var/home/core/zuul-output/0000755000175000017500000000000015111521160014516 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111553164015473 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000007103227515111553153017704 0ustar rootrootNov 26 06:54:57 crc systemd[1]: Starting Kubernetes Kubelet... Nov 26 06:54:57 crc restorecon[4807]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:57 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:54:58 crc restorecon[4807]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 26 06:54:58 crc kubenswrapper[4940]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 06:54:58 crc kubenswrapper[4940]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 26 06:54:58 crc kubenswrapper[4940]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 06:54:58 crc kubenswrapper[4940]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 06:54:58 crc kubenswrapper[4940]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 26 06:54:58 crc kubenswrapper[4940]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.912762 4940 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918327 4940 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918340 4940 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918345 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918350 4940 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918355 4940 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918359 4940 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918363 4940 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918367 4940 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918370 4940 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918374 4940 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918378 4940 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918381 4940 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918386 4940 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918391 4940 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918395 4940 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918399 4940 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918402 4940 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918407 4940 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918411 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918416 4940 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918419 4940 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918423 4940 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918426 4940 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918430 4940 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918433 4940 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918437 4940 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918440 4940 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918444 4940 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918448 4940 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918452 4940 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918456 4940 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918460 4940 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918463 4940 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918467 4940 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918470 4940 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918474 4940 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918478 4940 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918481 4940 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918485 4940 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918488 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918492 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918495 4940 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918499 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918502 4940 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918506 4940 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918511 4940 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918515 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918518 4940 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918522 4940 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918525 4940 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918529 4940 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918532 4940 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918535 4940 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918539 4940 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918542 4940 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918546 4940 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918549 4940 feature_gate.go:330] unrecognized feature gate: Example Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918553 4940 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918556 4940 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918560 4940 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918563 4940 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918568 4940 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918572 4940 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918576 4940 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918581 4940 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918584 4940 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918588 4940 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918592 4940 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918595 4940 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918599 4940 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.918602 4940 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918674 4940 flags.go:64] FLAG: --address="0.0.0.0" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918682 4940 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918688 4940 flags.go:64] FLAG: --anonymous-auth="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918693 4940 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918698 4940 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918704 4940 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918710 4940 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918715 4940 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918720 4940 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918724 4940 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918730 4940 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918734 4940 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918738 4940 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918742 4940 flags.go:64] FLAG: --cgroup-root="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918746 4940 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918750 4940 flags.go:64] FLAG: --client-ca-file="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918754 4940 flags.go:64] FLAG: --cloud-config="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918758 4940 flags.go:64] FLAG: --cloud-provider="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918762 4940 flags.go:64] FLAG: --cluster-dns="[]" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918769 4940 flags.go:64] FLAG: --cluster-domain="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918773 4940 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918777 4940 flags.go:64] FLAG: --config-dir="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918781 4940 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918785 4940 flags.go:64] FLAG: --container-log-max-files="5" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918791 4940 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918795 4940 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918799 4940 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918804 4940 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918808 4940 flags.go:64] FLAG: --contention-profiling="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918812 4940 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918816 4940 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918821 4940 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918825 4940 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918830 4940 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918835 4940 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918839 4940 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918843 4940 flags.go:64] FLAG: --enable-load-reader="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918847 4940 flags.go:64] FLAG: --enable-server="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918853 4940 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918858 4940 flags.go:64] FLAG: --event-burst="100" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918862 4940 flags.go:64] FLAG: --event-qps="50" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918866 4940 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918870 4940 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918874 4940 flags.go:64] FLAG: --eviction-hard="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918879 4940 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918884 4940 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918888 4940 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918893 4940 flags.go:64] FLAG: --eviction-soft="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918897 4940 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918901 4940 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918905 4940 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918909 4940 flags.go:64] FLAG: --experimental-mounter-path="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918913 4940 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918917 4940 flags.go:64] FLAG: --fail-swap-on="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918921 4940 flags.go:64] FLAG: --feature-gates="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918926 4940 flags.go:64] FLAG: --file-check-frequency="20s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918931 4940 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918935 4940 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918939 4940 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918943 4940 flags.go:64] FLAG: --healthz-port="10248" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918947 4940 flags.go:64] FLAG: --help="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918951 4940 flags.go:64] FLAG: --hostname-override="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918955 4940 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918959 4940 flags.go:64] FLAG: --http-check-frequency="20s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918963 4940 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918967 4940 flags.go:64] FLAG: --image-credential-provider-config="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918972 4940 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918976 4940 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918980 4940 flags.go:64] FLAG: --image-service-endpoint="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918984 4940 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918989 4940 flags.go:64] FLAG: --kube-api-burst="100" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918993 4940 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.918998 4940 flags.go:64] FLAG: --kube-api-qps="50" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919002 4940 flags.go:64] FLAG: --kube-reserved="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919006 4940 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919010 4940 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919014 4940 flags.go:64] FLAG: --kubelet-cgroups="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919019 4940 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919023 4940 flags.go:64] FLAG: --lock-file="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919027 4940 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919031 4940 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919048 4940 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919056 4940 flags.go:64] FLAG: --log-json-split-stream="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919060 4940 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919065 4940 flags.go:64] FLAG: --log-text-split-stream="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919069 4940 flags.go:64] FLAG: --logging-format="text" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919073 4940 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919078 4940 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919082 4940 flags.go:64] FLAG: --manifest-url="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919086 4940 flags.go:64] FLAG: --manifest-url-header="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919092 4940 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919096 4940 flags.go:64] FLAG: --max-open-files="1000000" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919101 4940 flags.go:64] FLAG: --max-pods="110" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919105 4940 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919110 4940 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919114 4940 flags.go:64] FLAG: --memory-manager-policy="None" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919118 4940 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919122 4940 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919126 4940 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919131 4940 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919141 4940 flags.go:64] FLAG: --node-status-max-images="50" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919146 4940 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919150 4940 flags.go:64] FLAG: --oom-score-adj="-999" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919155 4940 flags.go:64] FLAG: --pod-cidr="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919159 4940 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919165 4940 flags.go:64] FLAG: --pod-manifest-path="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919169 4940 flags.go:64] FLAG: --pod-max-pids="-1" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919173 4940 flags.go:64] FLAG: --pods-per-core="0" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919178 4940 flags.go:64] FLAG: --port="10250" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919182 4940 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919186 4940 flags.go:64] FLAG: --provider-id="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919190 4940 flags.go:64] FLAG: --qos-reserved="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919194 4940 flags.go:64] FLAG: --read-only-port="10255" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919198 4940 flags.go:64] FLAG: --register-node="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919202 4940 flags.go:64] FLAG: --register-schedulable="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919207 4940 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919214 4940 flags.go:64] FLAG: --registry-burst="10" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919219 4940 flags.go:64] FLAG: --registry-qps="5" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919224 4940 flags.go:64] FLAG: --reserved-cpus="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919228 4940 flags.go:64] FLAG: --reserved-memory="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919233 4940 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919237 4940 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919241 4940 flags.go:64] FLAG: --rotate-certificates="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919246 4940 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919250 4940 flags.go:64] FLAG: --runonce="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919254 4940 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919258 4940 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919262 4940 flags.go:64] FLAG: --seccomp-default="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919266 4940 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919270 4940 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919275 4940 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919279 4940 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919283 4940 flags.go:64] FLAG: --storage-driver-password="root" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919288 4940 flags.go:64] FLAG: --storage-driver-secure="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919292 4940 flags.go:64] FLAG: --storage-driver-table="stats" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919296 4940 flags.go:64] FLAG: --storage-driver-user="root" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919300 4940 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919304 4940 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919309 4940 flags.go:64] FLAG: --system-cgroups="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919313 4940 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919320 4940 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919324 4940 flags.go:64] FLAG: --tls-cert-file="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919328 4940 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919334 4940 flags.go:64] FLAG: --tls-min-version="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919338 4940 flags.go:64] FLAG: --tls-private-key-file="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919342 4940 flags.go:64] FLAG: --topology-manager-policy="none" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919346 4940 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919354 4940 flags.go:64] FLAG: --topology-manager-scope="container" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919358 4940 flags.go:64] FLAG: --v="2" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919364 4940 flags.go:64] FLAG: --version="false" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919370 4940 flags.go:64] FLAG: --vmodule="" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919375 4940 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919379 4940 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919477 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919482 4940 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919487 4940 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919491 4940 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919495 4940 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919500 4940 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919504 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919508 4940 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919512 4940 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919515 4940 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919519 4940 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919522 4940 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919526 4940 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919530 4940 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919534 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919538 4940 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919541 4940 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919545 4940 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919548 4940 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919552 4940 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919556 4940 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919559 4940 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919563 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919567 4940 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919570 4940 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919574 4940 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919579 4940 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919583 4940 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919587 4940 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919592 4940 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919596 4940 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919600 4940 feature_gate.go:330] unrecognized feature gate: Example Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919604 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919608 4940 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919613 4940 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919618 4940 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919622 4940 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919627 4940 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919631 4940 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919634 4940 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919639 4940 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919642 4940 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919646 4940 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919651 4940 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919655 4940 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919659 4940 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919663 4940 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919667 4940 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919670 4940 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919674 4940 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919678 4940 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919681 4940 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919686 4940 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919690 4940 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919694 4940 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919698 4940 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919703 4940 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919707 4940 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919713 4940 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919717 4940 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919721 4940 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919725 4940 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919728 4940 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919732 4940 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919736 4940 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919739 4940 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919743 4940 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919747 4940 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919750 4940 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919754 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.919758 4940 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.919771 4940 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.931548 4940 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.931775 4940 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931890 4940 feature_gate.go:330] unrecognized feature gate: Example Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931902 4940 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931911 4940 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931921 4940 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931931 4940 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931942 4940 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931952 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931961 4940 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931969 4940 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931977 4940 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931985 4940 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.931995 4940 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932007 4940 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932018 4940 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932027 4940 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932065 4940 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932079 4940 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932090 4940 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932099 4940 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932108 4940 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932116 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932124 4940 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932132 4940 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932140 4940 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932147 4940 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932155 4940 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932163 4940 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932172 4940 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932180 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932188 4940 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932196 4940 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932204 4940 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932212 4940 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932220 4940 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932230 4940 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932239 4940 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932246 4940 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932254 4940 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932262 4940 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932270 4940 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932278 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932286 4940 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932293 4940 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932302 4940 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932310 4940 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932352 4940 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932364 4940 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932373 4940 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932381 4940 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932390 4940 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932398 4940 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932407 4940 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932415 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932424 4940 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932433 4940 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932442 4940 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932450 4940 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932461 4940 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932471 4940 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932480 4940 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932489 4940 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932497 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932505 4940 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932514 4940 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932522 4940 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932530 4940 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932538 4940 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932546 4940 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932554 4940 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932561 4940 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932570 4940 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.932583 4940 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932793 4940 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932806 4940 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932815 4940 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932824 4940 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932832 4940 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932840 4940 feature_gate.go:330] unrecognized feature gate: Example Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932850 4940 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932862 4940 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932871 4940 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932881 4940 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932889 4940 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932897 4940 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932905 4940 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932913 4940 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932922 4940 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932932 4940 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932941 4940 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932950 4940 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932958 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932966 4940 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932974 4940 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932984 4940 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.932993 4940 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933002 4940 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933010 4940 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933018 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933026 4940 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933035 4940 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933077 4940 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933087 4940 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933097 4940 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933104 4940 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933112 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933120 4940 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933129 4940 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933137 4940 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933145 4940 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933152 4940 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933160 4940 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933168 4940 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933176 4940 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933184 4940 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933191 4940 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933199 4940 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933207 4940 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933214 4940 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933223 4940 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933231 4940 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933239 4940 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933250 4940 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933260 4940 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933270 4940 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933279 4940 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933287 4940 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933295 4940 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933303 4940 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933311 4940 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933319 4940 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933327 4940 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933335 4940 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933343 4940 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933351 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933359 4940 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933367 4940 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933375 4940 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933383 4940 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933391 4940 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933399 4940 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933407 4940 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933415 4940 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 06:54:58 crc kubenswrapper[4940]: W1126 06:54:58.933423 4940 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.933435 4940 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.934726 4940 server.go:940] "Client rotation is on, will bootstrap in background" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.940998 4940 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.941190 4940 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.945568 4940 server.go:997] "Starting client certificate rotation" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.945609 4940 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.945851 4940 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-12 01:54:08.85389068 +0000 UTC Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.946004 4940 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 378h59m9.90789364s for next certificate rotation Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.972161 4940 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.975596 4940 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 06:54:58 crc kubenswrapper[4940]: I1126 06:54:58.990417 4940 log.go:25] "Validated CRI v1 runtime API" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.033441 4940 log.go:25] "Validated CRI v1 image API" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.036711 4940 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.046759 4940 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-26-06-44-53-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.046811 4940 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.079135 4940 manager.go:217] Machine: {Timestamp:2025-11-26 06:54:59.073536278 +0000 UTC m=+0.593677977 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:54d650ff-21de-4a69-b96e-f42595cf8fe0 BootID:9820c585-3300-4e94-8fad-73afaec61623 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:66:e3:68 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:66:e3:68 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:2e:91:c0 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:c3:ee:70 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:95:13:60 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:8d:7b:15 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:73:11:99 Speed:-1 Mtu:1496} {Name:ens7.44 MacAddress:52:54:00:87:21:b2 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:72:87:38:a1:f7:87 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:32:43:c3:6d:d3:8d Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.079518 4940 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.079752 4940 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.081745 4940 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.082072 4940 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.082145 4940 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.082533 4940 topology_manager.go:138] "Creating topology manager with none policy" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.082553 4940 container_manager_linux.go:303] "Creating device plugin manager" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.083180 4940 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.083233 4940 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.083470 4940 state_mem.go:36] "Initialized new in-memory state store" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.083614 4940 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.087152 4940 kubelet.go:418] "Attempting to sync node with API server" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.087190 4940 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.087228 4940 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.087248 4940 kubelet.go:324] "Adding apiserver pod source" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.087266 4940 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.092311 4940 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.093970 4940 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.095509 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.095536 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.095644 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.095582 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.097212 4940 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.100926 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.100985 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.100999 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101012 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101064 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101078 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101091 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101116 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101132 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101147 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101192 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101206 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101246 4940 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.101925 4940 server.go:1280] "Started kubelet" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.102688 4940 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.102826 4940 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.102835 4940 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.103598 4940 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 26 06:54:59 crc systemd[1]: Started Kubernetes Kubelet. Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.105875 4940 server.go:460] "Adding debug handlers to kubelet server" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.106183 4940 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.106227 4940 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.106278 4940 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 16:10:08.07943364 +0000 UTC Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.106392 4940 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 921h15m8.9730467s for next certificate rotation Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.106826 4940 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.106844 4940 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.106891 4940 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.106973 4940 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.107628 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="200ms" Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.108142 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.108222 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.109150 4940 factory.go:55] Registering systemd factory Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.109178 4940 factory.go:221] Registration of the systemd container factory successfully Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.109558 4940 factory.go:153] Registering CRI-O factory Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.109591 4940 factory.go:221] Registration of the crio container factory successfully Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.109713 4940 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.109769 4940 factory.go:103] Registering Raw factory Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.109804 4940 manager.go:1196] Started watching for new ooms in manager Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.111280 4940 manager.go:319] Starting recovery of all containers Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.111395 4940 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.58:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b7c12c4fd68d2 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 06:54:59.101878482 +0000 UTC m=+0.622020141,LastTimestamp:2025-11-26 06:54:59.101878482 +0000 UTC m=+0.622020141,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118029 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118106 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118129 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118145 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118159 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118173 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118185 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118197 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118210 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118223 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118234 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118246 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118277 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118299 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118310 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118322 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118333 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118346 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118358 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118402 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118419 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118430 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118442 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118490 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118509 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118524 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118578 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118595 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118609 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118621 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118633 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118645 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118658 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118672 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118684 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118698 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118712 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118726 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118740 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118753 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.118766 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122740 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122769 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122801 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122819 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122845 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122876 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122895 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122921 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122943 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122962 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.122990 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123027 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123075 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123108 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123135 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123164 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123192 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123213 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123232 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123259 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123300 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123327 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123347 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123363 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123388 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123406 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123429 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123449 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123466 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123490 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123510 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123531 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123556 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123575 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123600 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123618 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123635 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123664 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123685 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123719 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123747 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123767 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123799 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123821 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123849 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123871 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123894 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123924 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123944 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123973 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.123993 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124010 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124060 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124082 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124107 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124124 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124143 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124169 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124189 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124207 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124231 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124250 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124273 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124299 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124327 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.124352 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.127329 4940 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.127402 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.127447 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.127482 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.127515 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.127592 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128101 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128191 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128420 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128460 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128474 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128490 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128501 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128512 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.128527 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129598 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129621 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129636 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129650 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129665 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129680 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129694 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129707 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129720 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129732 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129745 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129762 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129776 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129788 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129803 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129824 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129845 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129864 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129879 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129893 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129906 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129920 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129937 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129950 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129964 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129979 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.129992 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130004 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130016 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130029 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130062 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130075 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130089 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130133 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130149 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130164 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130176 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130188 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130200 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130213 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130244 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130258 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130271 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130310 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130325 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130339 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130355 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130400 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130416 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130430 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130444 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130457 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130477 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130492 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130505 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130521 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130536 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130549 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130562 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130575 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130618 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130633 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130648 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130662 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130677 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130691 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130707 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130721 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130738 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130752 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130765 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130781 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130796 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130810 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130825 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130839 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130853 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130867 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130883 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130895 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130909 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130921 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.130935 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.131010 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.131029 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.131069 4940 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.131084 4940 reconstruct.go:97] "Volume reconstruction finished" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.131094 4940 reconciler.go:26] "Reconciler: start to sync state" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.143199 4940 manager.go:324] Recovery completed Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.157359 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.159403 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.159451 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.159464 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.160726 4940 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.160753 4940 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.160783 4940 state_mem.go:36] "Initialized new in-memory state store" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.161845 4940 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.164176 4940 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.164228 4940 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.164263 4940 kubelet.go:2335] "Starting kubelet main sync loop" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.164327 4940 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.168423 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.168509 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.179393 4940 policy_none.go:49] "None policy: Start" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.180306 4940 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.180353 4940 state_mem.go:35] "Initializing new in-memory state store" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.207725 4940 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.234259 4940 manager.go:334] "Starting Device Plugin manager" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.234356 4940 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.234369 4940 server.go:79] "Starting device plugin registration server" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.234780 4940 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.234797 4940 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.235244 4940 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.235347 4940 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.235357 4940 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.240193 4940 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.265484 4940 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.265633 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.267234 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.267311 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.267336 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.267597 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.267769 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.267849 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269185 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269285 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269247 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269308 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269368 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269597 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269754 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.269790 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271209 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271255 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271274 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271357 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271390 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271404 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271418 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271588 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.271640 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.272587 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.272620 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.272628 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.272782 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.272910 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.272968 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.272995 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273002 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273107 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273409 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273457 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273486 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273759 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273826 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273901 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273927 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.273938 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.278408 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.278445 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.278486 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.308385 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="400ms" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.333712 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.333779 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.333810 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.333835 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.333859 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.333886 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.333975 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.334008 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.334028 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.334088 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.334143 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.334209 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.334243 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.334265 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.334286 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.335798 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.337151 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.337202 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.337221 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.337262 4940 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.337963 4940 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435704 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435778 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435818 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435851 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435880 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435888 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435935 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435984 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435912 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436009 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436139 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.435997 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436189 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436227 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436159 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436268 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436264 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436243 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436308 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436344 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436340 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436394 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436412 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436421 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436485 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436518 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436558 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436521 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436653 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.436803 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.538137 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.540326 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.540362 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.540370 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.540395 4940 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.540852 4940 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.596400 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.601994 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.626875 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.643120 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-0d99001451d51947ae6ce70c34b1af581158ad71a8faf63b1e1b260eb59eb65f WatchSource:0}: Error finding container 0d99001451d51947ae6ce70c34b1af581158ad71a8faf63b1e1b260eb59eb65f: Status 404 returned error can't find the container with id 0d99001451d51947ae6ce70c34b1af581158ad71a8faf63b1e1b260eb59eb65f Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.643736 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.645642 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-b94b2e073dbb27ee369ae801cc22c3f48f20aee3f442ea8d5146043bf0dcce40 WatchSource:0}: Error finding container b94b2e073dbb27ee369ae801cc22c3f48f20aee3f442ea8d5146043bf0dcce40: Status 404 returned error can't find the container with id b94b2e073dbb27ee369ae801cc22c3f48f20aee3f442ea8d5146043bf0dcce40 Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.647844 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.651762 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-d2855d8200c63764bfdaa49944a049bbf9f6d37e9aa1107776fdcb463996ca46 WatchSource:0}: Error finding container d2855d8200c63764bfdaa49944a049bbf9f6d37e9aa1107776fdcb463996ca46: Status 404 returned error can't find the container with id d2855d8200c63764bfdaa49944a049bbf9f6d37e9aa1107776fdcb463996ca46 Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.659200 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-e9dcdfef2111b46fe80d6a8ee6535a86d6a21e24163170221dbf5331916b557f WatchSource:0}: Error finding container e9dcdfef2111b46fe80d6a8ee6535a86d6a21e24163170221dbf5331916b557f: Status 404 returned error can't find the container with id e9dcdfef2111b46fe80d6a8ee6535a86d6a21e24163170221dbf5331916b557f Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.670631 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-27b3935235ae20936350b3c383c2c1a4a18458a87811e175524acfc0133c22ec WatchSource:0}: Error finding container 27b3935235ae20936350b3c383c2c1a4a18458a87811e175524acfc0133c22ec: Status 404 returned error can't find the container with id 27b3935235ae20936350b3c383c2c1a4a18458a87811e175524acfc0133c22ec Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.710085 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="800ms" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.940957 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.943080 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.943153 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.943178 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:54:59 crc kubenswrapper[4940]: I1126 06:54:59.943225 4940 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.943707 4940 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Nov 26 06:54:59 crc kubenswrapper[4940]: W1126 06:54:59.999836 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:54:59 crc kubenswrapper[4940]: E1126 06:54:59.999927 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:55:00 crc kubenswrapper[4940]: W1126 06:55:00.101362 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:00 crc kubenswrapper[4940]: E1126 06:55:00.101461 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.104172 4940 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:00 crc kubenswrapper[4940]: W1126 06:55:00.113908 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:00 crc kubenswrapper[4940]: E1126 06:55:00.114029 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:55:00 crc kubenswrapper[4940]: W1126 06:55:00.135866 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:00 crc kubenswrapper[4940]: E1126 06:55:00.135923 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.169669 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"27b3935235ae20936350b3c383c2c1a4a18458a87811e175524acfc0133c22ec"} Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.171552 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e9dcdfef2111b46fe80d6a8ee6535a86d6a21e24163170221dbf5331916b557f"} Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.173502 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"d2855d8200c63764bfdaa49944a049bbf9f6d37e9aa1107776fdcb463996ca46"} Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.174796 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b94b2e073dbb27ee369ae801cc22c3f48f20aee3f442ea8d5146043bf0dcce40"} Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.176866 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0d99001451d51947ae6ce70c34b1af581158ad71a8faf63b1e1b260eb59eb65f"} Nov 26 06:55:00 crc kubenswrapper[4940]: E1126 06:55:00.511635 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="1.6s" Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.743983 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.745914 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.745953 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.745962 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:00 crc kubenswrapper[4940]: I1126 06:55:00.745990 4940 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:55:00 crc kubenswrapper[4940]: E1126 06:55:00.746527 4940 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.104274 4940 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.182274 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf" exitCode=0 Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.182393 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf"} Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.182435 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.183550 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.183591 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.183606 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.184287 4940 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e" exitCode=0 Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.184340 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e"} Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.184549 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.185593 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.186418 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.186467 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.186485 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.187629 4940 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf" exitCode=0 Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.187726 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf"} Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.187802 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.187821 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.187833 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.188011 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.189835 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.189865 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.189881 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.191966 4940 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332" exitCode=0 Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.192086 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332"} Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.192124 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.193415 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.193449 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.193466 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.195152 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5"} Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.195203 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6"} Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.195221 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a"} Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.195235 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701"} Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.195244 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.196315 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.196355 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:01 crc kubenswrapper[4940]: I1126 06:55:01.196372 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:01 crc kubenswrapper[4940]: W1126 06:55:01.989582 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:01 crc kubenswrapper[4940]: E1126 06:55:01.989996 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:55:02 crc kubenswrapper[4940]: W1126 06:55:02.071963 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:02 crc kubenswrapper[4940]: E1126 06:55:02.072071 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.104092 4940 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:02 crc kubenswrapper[4940]: E1126 06:55:02.112818 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="3.2s" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.204101 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.204189 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.204225 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.204253 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.211560 4940 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750" exitCode=0 Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.211714 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.211759 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.214019 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.215202 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.215286 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.216322 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"3131c0c473bf5ea3e9ba041ef10dd564f1af25aa7c97b9ea2da10e9e98f8ef17"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.216615 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.217979 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.218114 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.218315 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.219632 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.219830 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.220169 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.220209 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.220224 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d"} Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.220508 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.220533 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.220544 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.221272 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.221288 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.221298 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:02 crc kubenswrapper[4940]: W1126 06:55:02.306664 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Nov 26 06:55:02 crc kubenswrapper[4940]: E1126 06:55:02.306772 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.346645 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.347674 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.347699 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.347709 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:02 crc kubenswrapper[4940]: I1126 06:55:02.347730 4940 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:55:02 crc kubenswrapper[4940]: E1126 06:55:02.348078 4940 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.042908 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.226263 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143"} Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.226292 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.227734 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.227794 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.227829 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.228936 4940 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd" exitCode=0 Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.229010 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd"} Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.229116 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.229133 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.229162 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.229215 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.229232 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.230321 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.230378 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.230396 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231203 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231238 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231254 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231269 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231296 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231308 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231386 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231401 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.231411 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:03 crc kubenswrapper[4940]: I1126 06:55:03.829106 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.236660 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113"} Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.236725 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0"} Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.236746 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd"} Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.236767 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6"} Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.236746 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.236804 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.236831 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.237930 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.237985 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.238018 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.237964 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.238071 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:04 crc kubenswrapper[4940]: I1126 06:55:04.238072 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.245756 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000"} Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.245903 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.247250 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.247290 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.247305 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.279882 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.280084 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.281395 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.281455 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.281472 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.289862 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.548977 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.550879 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.550944 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.550963 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.550998 4940 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.803820 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.804108 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.805727 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.805791 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:05 crc kubenswrapper[4940]: I1126 06:55:05.805819 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.249087 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.249202 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.250412 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.250463 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.250483 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.251181 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.251256 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.251276 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.351561 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.351873 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.353713 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.353775 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.353799 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.505687 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 26 06:55:06 crc kubenswrapper[4940]: I1126 06:55:06.815285 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.137317 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.250816 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.250866 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.250919 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252353 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252365 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252434 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252460 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252400 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252514 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252646 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252665 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:07 crc kubenswrapper[4940]: I1126 06:55:07.252673 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:09 crc kubenswrapper[4940]: E1126 06:55:09.240313 4940 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 06:55:10 crc kubenswrapper[4940]: I1126 06:55:10.138111 4940 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 06:55:10 crc kubenswrapper[4940]: I1126 06:55:10.138238 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 06:55:10 crc kubenswrapper[4940]: I1126 06:55:10.281031 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:10 crc kubenswrapper[4940]: I1126 06:55:10.281390 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:10 crc kubenswrapper[4940]: I1126 06:55:10.283080 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:10 crc kubenswrapper[4940]: I1126 06:55:10.283123 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:10 crc kubenswrapper[4940]: I1126 06:55:10.283138 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:10 crc kubenswrapper[4940]: I1126 06:55:10.288232 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:11 crc kubenswrapper[4940]: I1126 06:55:11.263206 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:11 crc kubenswrapper[4940]: I1126 06:55:11.264711 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:11 crc kubenswrapper[4940]: I1126 06:55:11.264797 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:11 crc kubenswrapper[4940]: I1126 06:55:11.264816 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:12 crc kubenswrapper[4940]: I1126 06:55:12.249069 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 26 06:55:12 crc kubenswrapper[4940]: I1126 06:55:12.249310 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:12 crc kubenswrapper[4940]: I1126 06:55:12.250605 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:12 crc kubenswrapper[4940]: I1126 06:55:12.250650 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:12 crc kubenswrapper[4940]: I1126 06:55:12.250664 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:12 crc kubenswrapper[4940]: W1126 06:55:12.963600 4940 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 26 06:55:12 crc kubenswrapper[4940]: I1126 06:55:12.963708 4940 trace.go:236] Trace[1632089179]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 06:55:02.961) (total time: 10001ms): Nov 26 06:55:12 crc kubenswrapper[4940]: Trace[1632089179]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (06:55:12.963) Nov 26 06:55:12 crc kubenswrapper[4940]: Trace[1632089179]: [10.001953876s] [10.001953876s] END Nov 26 06:55:12 crc kubenswrapper[4940]: E1126 06:55:12.963738 4940 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 26 06:55:13 crc kubenswrapper[4940]: E1126 06:55:13.050216 4940 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.187b7c12c4fd68d2 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 06:54:59.101878482 +0000 UTC m=+0.622020141,LastTimestamp:2025-11-26 06:54:59.101878482 +0000 UTC m=+0.622020141,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.104910 4940 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.157427 4940 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.157532 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.165873 4940 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.165931 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.268497 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.270367 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143" exitCode=255 Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.270402 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143"} Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.270552 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.271493 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.271556 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.271568 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:13 crc kubenswrapper[4940]: I1126 06:55:13.272574 4940 scope.go:117] "RemoveContainer" containerID="1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143" Nov 26 06:55:14 crc kubenswrapper[4940]: I1126 06:55:14.275168 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 06:55:14 crc kubenswrapper[4940]: I1126 06:55:14.279184 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048"} Nov 26 06:55:14 crc kubenswrapper[4940]: I1126 06:55:14.279333 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:14 crc kubenswrapper[4940]: I1126 06:55:14.280119 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:14 crc kubenswrapper[4940]: I1126 06:55:14.280144 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:14 crc kubenswrapper[4940]: I1126 06:55:14.280152 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:15 crc kubenswrapper[4940]: I1126 06:55:15.804526 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:15 crc kubenswrapper[4940]: I1126 06:55:15.804802 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:15 crc kubenswrapper[4940]: I1126 06:55:15.806489 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:15 crc kubenswrapper[4940]: I1126 06:55:15.806561 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:15 crc kubenswrapper[4940]: I1126 06:55:15.806587 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:16 crc kubenswrapper[4940]: I1126 06:55:16.821838 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:16 crc kubenswrapper[4940]: I1126 06:55:16.822123 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:16 crc kubenswrapper[4940]: I1126 06:55:16.824488 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:16 crc kubenswrapper[4940]: I1126 06:55:16.824555 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:16 crc kubenswrapper[4940]: I1126 06:55:16.824569 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:16 crc kubenswrapper[4940]: I1126 06:55:16.829734 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:17 crc kubenswrapper[4940]: I1126 06:55:17.288842 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:17 crc kubenswrapper[4940]: I1126 06:55:17.289873 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:17 crc kubenswrapper[4940]: I1126 06:55:17.289927 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:17 crc kubenswrapper[4940]: I1126 06:55:17.289943 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:18 crc kubenswrapper[4940]: E1126 06:55:18.155733 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 26 06:55:18 crc kubenswrapper[4940]: E1126 06:55:18.159244 4940 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.159783 4940 trace.go:236] Trace[2050211040]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 06:55:07.211) (total time: 10947ms): Nov 26 06:55:18 crc kubenswrapper[4940]: Trace[2050211040]: ---"Objects listed" error: 10947ms (06:55:18.159) Nov 26 06:55:18 crc kubenswrapper[4940]: Trace[2050211040]: [10.947980818s] [10.947980818s] END Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.159925 4940 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.160392 4940 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.160608 4940 trace.go:236] Trace[72919618]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 06:55:07.469) (total time: 10691ms): Nov 26 06:55:18 crc kubenswrapper[4940]: Trace[72919618]: ---"Objects listed" error: 10691ms (06:55:18.160) Nov 26 06:55:18 crc kubenswrapper[4940]: Trace[72919618]: [10.691209155s] [10.691209155s] END Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.160630 4940 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.161681 4940 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.485791 4940 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.975596 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:18 crc kubenswrapper[4940]: I1126 06:55:18.981285 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.101026 4940 apiserver.go:52] "Watching apiserver" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.106518 4940 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.106833 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.107160 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.107262 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.107290 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.107281 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.107210 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.107596 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.107715 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.107727 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.107814 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.108505 4940 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.109220 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.111685 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.111782 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.111708 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.111745 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.111770 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.112356 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.112547 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.119484 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.156818 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168278 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168339 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168361 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168386 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168406 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168425 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168444 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168462 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168495 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168513 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168532 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168552 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168600 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168622 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168642 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168664 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168733 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168758 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168781 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168803 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168828 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168850 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168872 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168896 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168917 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168943 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168965 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.168986 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169008 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169030 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169068 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169091 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169115 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169137 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169195 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169220 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169244 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169268 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169291 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169315 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169338 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169362 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169383 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169411 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169433 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169459 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169482 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169506 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169527 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169553 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169574 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169598 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169621 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169647 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169674 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169723 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169748 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169774 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169803 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169827 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169850 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169874 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169898 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169922 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169950 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169975 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170000 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170022 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170130 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170156 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170181 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170305 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170352 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170538 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170563 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170591 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170616 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170699 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170723 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170743 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170806 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170831 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170872 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170892 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170913 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170933 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170955 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170977 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171001 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171159 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171187 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171210 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171233 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171256 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171278 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171302 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171325 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171347 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171371 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171393 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171431 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171466 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171487 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171506 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171527 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171654 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171680 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171700 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171738 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171758 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171782 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171803 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171822 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171844 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171865 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171885 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171906 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171926 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171946 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171990 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172131 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172189 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172212 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172247 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172268 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172291 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172327 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172349 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172373 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172395 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172419 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172444 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172469 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172511 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172535 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172558 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172589 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172616 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172640 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172665 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172689 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172712 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172734 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172756 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172793 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172818 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172842 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172868 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172891 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172916 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172945 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172971 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.172997 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173023 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173080 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173307 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173335 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173361 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173391 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173419 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173445 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173471 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173497 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173524 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173551 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173575 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173601 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173625 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173650 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173676 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173699 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173722 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173744 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173764 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173787 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173806 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173828 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173849 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173871 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173895 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173919 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173942 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173965 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173990 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174014 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174322 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174356 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174381 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174405 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174430 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174457 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174483 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174510 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174536 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174563 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174624 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174657 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174692 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174721 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174751 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174780 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174809 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174839 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174868 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174896 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174921 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174946 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174978 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.175005 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180244 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169466 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169622 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.169897 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170378 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170452 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170581 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170861 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.170968 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171156 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.171268 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.173631 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174188 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174486 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.174763 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.175006 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.175437 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.175503 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.175638 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.176096 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.176196 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.176392 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.176619 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.195225 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.176690 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.176873 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.177157 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.177413 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.177567 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.177673 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.177915 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.177933 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.178167 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.178759 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.178946 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.179151 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.179167 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.179470 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.179641 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.179752 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180109 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180222 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180435 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180589 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180638 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180748 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180908 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.180969 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.181066 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.181218 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.181587 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.181741 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.181802 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.181905 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.182010 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.182596 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.183934 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.184496 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.185665 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.186016 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.186027 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.186389 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.186681 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.187287 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.187320 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.187473 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.187608 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.187622 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.192543 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.192536 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.192552 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.192853 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.192845 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.192997 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.193119 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.193303 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.193411 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.193616 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.193610 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.193708 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.196467 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.196798 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.197223 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.197271 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.197689 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.197864 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.198115 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.198406 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.198514 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.198513 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.199060 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.199294 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.199472 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.199695 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.199723 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.206556 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.206577 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.206663 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.206943 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.207141 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.207481 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.207541 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.208210 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:55:19.708188016 +0000 UTC m=+21.228329635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.208364 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.208463 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.209435 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.209613 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.209848 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.209893 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.210026 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.210164 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.210314 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.210550 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.210821 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.210837 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.210980 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.211121 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.211144 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.211276 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.211404 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.211630 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.211666 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.211714 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.212084 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.209666 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.212527 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.212516 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213350 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213356 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213388 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213412 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213466 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213602 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213718 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213795 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213762 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213868 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.213800 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.214072 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.214172 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.214550 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.214588 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.214594 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.214802 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.214921 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.214961 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.215318 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.215372 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.215551 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.215551 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.218126 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.218254 4940 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.218307 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:19.718288195 +0000 UTC m=+21.238429814 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.218382 4940 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.218409 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:19.718402879 +0000 UTC m=+21.238544498 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.219252 4940 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.220017 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.220431 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.221543 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.221800 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.222002 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.222025 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.222280 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.222571 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.223476 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.223550 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.223837 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.224071 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.224673 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.224691 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.224795 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.225584 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.225815 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.225894 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.225910 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.220105 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.226610 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.226909 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.227392 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.228633 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.230618 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.234652 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.234812 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.235537 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.235860 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.235881 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.235893 4940 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.235957 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:19.735926764 +0000 UTC m=+21.256068383 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.236780 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.236800 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.236808 4940 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.236880 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:19.736827413 +0000 UTC m=+21.256969032 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.236908 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.236947 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.237310 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.237323 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.237870 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.238821 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.238966 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.239092 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.239193 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.239426 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.239882 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.239911 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.239924 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.245815 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.245985 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.246006 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.246208 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.246301 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.246461 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.246776 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.247510 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.247664 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.251869 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.252903 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.253054 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.265249 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.266793 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.268988 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277256 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277301 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277348 4940 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277360 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277370 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277380 4940 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277389 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277398 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277407 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277416 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277424 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277433 4940 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277442 4940 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277449 4940 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277458 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277466 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277476 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277485 4940 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277493 4940 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277503 4940 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277511 4940 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277521 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277530 4940 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277538 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277547 4940 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277556 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277564 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277573 4940 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277581 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277589 4940 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277602 4940 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277611 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277619 4940 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277627 4940 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277636 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277644 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277652 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277660 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277668 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.278006 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.278247 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.278474 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.277676 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279027 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279053 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279062 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279070 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279078 4940 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279087 4940 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279095 4940 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279105 4940 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279113 4940 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279121 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279130 4940 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279138 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279146 4940 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279154 4940 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279162 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279171 4940 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279179 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279188 4940 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279197 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279207 4940 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279215 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279225 4940 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279235 4940 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279243 4940 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279251 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279261 4940 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279269 4940 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279277 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279286 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279294 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279302 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279313 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279322 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279330 4940 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279337 4940 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279346 4940 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279354 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279362 4940 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279370 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279380 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279390 4940 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279398 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279406 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279414 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279422 4940 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279430 4940 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279440 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279448 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279455 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279464 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279472 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279480 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279488 4940 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279495 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279503 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279511 4940 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279522 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279531 4940 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279539 4940 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279547 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279555 4940 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279564 4940 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279572 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279582 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279590 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279598 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279607 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279615 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279625 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279633 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279641 4940 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279649 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279659 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279667 4940 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279675 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279682 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279690 4940 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279698 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279706 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279715 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279723 4940 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279731 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279738 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279746 4940 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279753 4940 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279761 4940 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279768 4940 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279776 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279784 4940 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279793 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279801 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279809 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279816 4940 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279824 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279832 4940 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279840 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279847 4940 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279855 4940 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279863 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279871 4940 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279880 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279888 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279896 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279904 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279914 4940 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279922 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279930 4940 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279939 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279946 4940 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279954 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279963 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279970 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279978 4940 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279986 4940 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.279994 4940 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280002 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280011 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280019 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280027 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280048 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280057 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280064 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280073 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280081 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280088 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280096 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280104 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280113 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280123 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280132 4940 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280141 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280151 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280159 4940 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280168 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280175 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280183 4940 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280193 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280202 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280210 4940 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280219 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280227 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280236 4940 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280245 4940 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280255 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280264 4940 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280273 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280281 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280292 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280300 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280309 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280317 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280326 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280334 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280342 4940 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.280350 4940 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.283342 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.298222 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.298828 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.304135 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048" exitCode=255 Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.304914 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048"} Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.304980 4940 scope.go:117] "RemoveContainer" containerID="1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.306861 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.313698 4940 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.337079 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.348002 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.358680 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.371908 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.381496 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.393014 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.421607 4940 scope.go:117] "RemoveContainer" containerID="f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.421809 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.423348 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.425146 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.433172 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.442178 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.444325 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: W1126 06:55:19.449186 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-a5cc320760d74b73c3400d41e572d1e50da1b59732e68609576cbe67905ab624 WatchSource:0}: Error finding container a5cc320760d74b73c3400d41e572d1e50da1b59732e68609576cbe67905ab624: Status 404 returned error can't find the container with id a5cc320760d74b73c3400d41e572d1e50da1b59732e68609576cbe67905ab624 Nov 26 06:55:19 crc kubenswrapper[4940]: W1126 06:55:19.459294 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-6e3471f071097b2c0db71b8eb0e18f2ef728c541aa015ccf9418b4993ef4964c WatchSource:0}: Error finding container 6e3471f071097b2c0db71b8eb0e18f2ef728c541aa015ccf9418b4993ef4964c: Status 404 returned error can't find the container with id 6e3471f071097b2c0db71b8eb0e18f2ef728c541aa015ccf9418b4993ef4964c Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.466151 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.490144 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.562765 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-kfhtm"] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.563076 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kfhtm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.565516 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.565765 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.565917 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.576911 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:12Z\\\",\\\"message\\\":\\\"W1126 06:55:02.418263 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1126 06:55:02.418626 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764140102 cert, and key in /tmp/serving-cert-3104727086/serving-signer.crt, /tmp/serving-cert-3104727086/serving-signer.key\\\\nI1126 06:55:02.647922 1 observer_polling.go:159] Starting file observer\\\\nW1126 06:55:02.652479 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 06:55:02.652659 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:02.654086 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3104727086/tls.crt::/tmp/serving-cert-3104727086/tls.key\\\\\\\"\\\\nF1126 06:55:12.893154 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.586229 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.599960 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.613367 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.618904 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.628683 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.638555 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.650327 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.661086 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.684117 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5a323006-557b-442b-82ce-595f2f77b1f2-hosts-file\") pod \"node-resolver-kfhtm\" (UID: \"5a323006-557b-442b-82ce-595f2f77b1f2\") " pod="openshift-dns/node-resolver-kfhtm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.684162 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jblj\" (UniqueName: \"kubernetes.io/projected/5a323006-557b-442b-82ce-595f2f77b1f2-kube-api-access-5jblj\") pod \"node-resolver-kfhtm\" (UID: \"5a323006-557b-442b-82ce-595f2f77b1f2\") " pod="openshift-dns/node-resolver-kfhtm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.784528 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.784600 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.784624 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5a323006-557b-442b-82ce-595f2f77b1f2-hosts-file\") pod \"node-resolver-kfhtm\" (UID: \"5a323006-557b-442b-82ce-595f2f77b1f2\") " pod="openshift-dns/node-resolver-kfhtm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.784648 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.784674 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.784690 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jblj\" (UniqueName: \"kubernetes.io/projected/5a323006-557b-442b-82ce-595f2f77b1f2-kube-api-access-5jblj\") pod \"node-resolver-kfhtm\" (UID: \"5a323006-557b-442b-82ce-595f2f77b1f2\") " pod="openshift-dns/node-resolver-kfhtm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.784714 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.784735 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5a323006-557b-442b-82ce-595f2f77b1f2-hosts-file\") pod \"node-resolver-kfhtm\" (UID: \"5a323006-557b-442b-82ce-595f2f77b1f2\") " pod="openshift-dns/node-resolver-kfhtm" Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784755 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:55:20.784723727 +0000 UTC m=+22.304865356 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784783 4940 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784800 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784829 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784833 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:20.78481939 +0000 UTC m=+22.304961009 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784843 4940 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784848 4940 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784882 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:20.784872991 +0000 UTC m=+22.305014610 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784804 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784902 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:20.784895472 +0000 UTC m=+22.305037091 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784931 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.784950 4940 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:19 crc kubenswrapper[4940]: E1126 06:55:19.785014 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:20.785000465 +0000 UTC m=+22.305142304 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.803395 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jblj\" (UniqueName: \"kubernetes.io/projected/5a323006-557b-442b-82ce-595f2f77b1f2-kube-api-access-5jblj\") pod \"node-resolver-kfhtm\" (UID: \"5a323006-557b-442b-82ce-595f2f77b1f2\") " pod="openshift-dns/node-resolver-kfhtm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.878261 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kfhtm" Nov 26 06:55:19 crc kubenswrapper[4940]: W1126 06:55:19.892259 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a323006_557b_442b_82ce_595f2f77b1f2.slice/crio-c5470ee2e6d54ecd6fa6416f7166685f4c337a58d1c4fa577086a4eadd043789 WatchSource:0}: Error finding container c5470ee2e6d54ecd6fa6416f7166685f4c337a58d1c4fa577086a4eadd043789: Status 404 returned error can't find the container with id c5470ee2e6d54ecd6fa6416f7166685f4c337a58d1c4fa577086a4eadd043789 Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.932662 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-kbfvm"] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.933071 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-x5j9z"] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.933256 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.937080 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.937209 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.938272 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lj789"] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.938422 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.939019 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.939062 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.941278 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.941835 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.942395 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.942470 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.942685 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.947761 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:12Z\\\",\\\"message\\\":\\\"W1126 06:55:02.418263 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1126 06:55:02.418626 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764140102 cert, and key in /tmp/serving-cert-3104727086/serving-signer.crt, /tmp/serving-cert-3104727086/serving-signer.key\\\\nI1126 06:55:02.647922 1 observer_polling.go:159] Starting file observer\\\\nW1126 06:55:02.652479 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 06:55:02.652659 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:02.654086 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3104727086/tls.crt::/tmp/serving-cert-3104727086/tls.key\\\\\\\"\\\\nF1126 06:55:12.893154 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.948317 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.948771 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-h7pkm"] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.948968 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-gnvm5"] Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.949102 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.949208 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.949689 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-gnvm5" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.951120 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.951739 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.953006 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.953227 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.953391 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.953548 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.953781 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.953971 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.954228 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.954375 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.954741 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.962491 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.963058 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.963908 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.973452 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:19 crc kubenswrapper[4940]: I1126 06:55:19.991368 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.000735 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.009146 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.012963 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.022961 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.031651 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.040488 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.051928 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.067177 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:12Z\\\",\\\"message\\\":\\\"W1126 06:55:02.418263 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1126 06:55:02.418626 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764140102 cert, and key in /tmp/serving-cert-3104727086/serving-signer.crt, /tmp/serving-cert-3104727086/serving-signer.key\\\\nI1126 06:55:02.647922 1 observer_polling.go:159] Starting file observer\\\\nW1126 06:55:02.652479 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 06:55:02.652659 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:02.654086 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3104727086/tls.crt::/tmp/serving-cert-3104727086/tls.key\\\\\\\"\\\\nF1126 06:55:12.893154 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.074838 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.085437 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086273 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ea5d7d28-22c3-4381-8206-f4853eca74cd-host\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086309 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-mcd-auth-proxy-config\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086327 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k2wx\" (UniqueName: \"kubernetes.io/projected/70f0d793-d867-4295-a64f-bfbcb7ad8322-kube-api-access-2k2wx\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086350 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-log-socket\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086367 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086392 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8kbn\" (UniqueName: \"kubernetes.io/projected/ea5d7d28-22c3-4381-8206-f4853eca74cd-kube-api-access-g8kbn\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086457 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-cni-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086500 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-conf-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086522 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-node-log\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086573 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/70f0d793-d867-4295-a64f-bfbcb7ad8322-cni-binary-copy\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086623 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-os-release\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086643 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-cni-binary-copy\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086662 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-k8s-cni-cncf-io\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086680 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-cnibin\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086699 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-kubelet\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086735 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-var-lib-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086755 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-script-lib\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086773 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovn-node-metrics-cert\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086797 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-system-cni-dir\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086813 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-os-release\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086843 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-tuning-conf-dir\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.086951 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-socket-dir-parent\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087008 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-config\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087060 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/70f0d793-d867-4295-a64f-bfbcb7ad8322-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087084 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-multus-certs\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087114 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-slash\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087148 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-ovn\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087228 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ea5d7d28-22c3-4381-8206-f4853eca74cd-serviceca\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087282 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nptlg\" (UniqueName: \"kubernetes.io/projected/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-kube-api-access-nptlg\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087325 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-kubelet\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087361 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-cni-bin\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087378 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-netd\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087393 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-env-overrides\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.087482 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-hostroot\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.088946 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5299\" (UniqueName: \"kubernetes.io/projected/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-kube-api-access-m5299\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089004 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-proxy-tls\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089030 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-etc-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089108 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-netns\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089162 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-systemd-units\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089200 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-netns\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089231 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-systemd\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089254 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-bin\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089295 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-cnibin\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089319 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-cni-multus\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089343 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmgxv\" (UniqueName: \"kubernetes.io/projected/69c1adb3-d9e7-4302-89d2-60745597f2cc-kube-api-access-lmgxv\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089372 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089402 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-ovn-kubernetes\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089458 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-daemon-config\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089505 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-etc-kubernetes\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089537 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-rootfs\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.089571 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-system-cni-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.102644 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.115889 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.128573 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.140121 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.153820 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.164474 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.164610 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.166086 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.174729 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.184603 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190200 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-system-cni-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190258 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-daemon-config\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190289 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-etc-kubernetes\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190321 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-rootfs\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190331 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-system-cni-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190354 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ea5d7d28-22c3-4381-8206-f4853eca74cd-host\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190382 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-mcd-auth-proxy-config\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k2wx\" (UniqueName: \"kubernetes.io/projected/70f0d793-d867-4295-a64f-bfbcb7ad8322-kube-api-access-2k2wx\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190446 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-cni-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190469 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-conf-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190470 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-etc-kubernetes\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190501 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-log-socket\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190538 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190484 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ea5d7d28-22c3-4381-8206-f4853eca74cd-host\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190558 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-cni-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190412 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-rootfs\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190603 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-conf-dir\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190606 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8kbn\" (UniqueName: \"kubernetes.io/projected/ea5d7d28-22c3-4381-8206-f4853eca74cd-kube-api-access-g8kbn\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190633 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-log-socket\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190662 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-cni-binary-copy\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190690 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-k8s-cni-cncf-io\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190714 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-node-log\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190664 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190741 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/70f0d793-d867-4295-a64f-bfbcb7ad8322-cni-binary-copy\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190787 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-k8s-cni-cncf-io\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190815 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-os-release\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190828 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-node-log\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190878 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-var-lib-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190838 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-var-lib-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190938 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-script-lib\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.190975 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-cnibin\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191015 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-kubelet\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191024 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-os-release\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191075 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-socket-dir-parent\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191116 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-config\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191152 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovn-node-metrics-cert\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191192 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-system-cni-dir\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191230 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-os-release\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191262 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-tuning-conf-dir\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191275 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-daemon-config\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191083 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-cnibin\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191301 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/70f0d793-d867-4295-a64f-bfbcb7ad8322-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191349 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-multus-certs\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191372 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-slash\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191395 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-ovn\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191415 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-mcd-auth-proxy-config\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191423 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ea5d7d28-22c3-4381-8206-f4853eca74cd-serviceca\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191480 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-slash\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191488 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nptlg\" (UniqueName: \"kubernetes.io/projected/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-kube-api-access-nptlg\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191508 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-multus-certs\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191161 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-multus-socket-dir-parent\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191112 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-kubelet\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191545 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-kubelet\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191580 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-env-overrides\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191609 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-cni-bin\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191638 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-netd\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191668 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-hostroot\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191737 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-netns\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191768 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5299\" (UniqueName: \"kubernetes.io/projected/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-kube-api-access-m5299\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191796 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-proxy-tls\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191827 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-etc-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191863 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-cnibin\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191891 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-cni-multus\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191918 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-systemd-units\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191950 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-netns\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191982 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-systemd\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191989 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-script-lib\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192011 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-bin\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192082 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmgxv\" (UniqueName: \"kubernetes.io/projected/69c1adb3-d9e7-4302-89d2-60745597f2cc-kube-api-access-lmgxv\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192109 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-system-cni-dir\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192146 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-cni-bin\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192121 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192198 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-os-release\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192158 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192213 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-ovn-kubernetes\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192240 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-ovn-kubernetes\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192259 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/70f0d793-d867-4295-a64f-bfbcb7ad8322-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.191453 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/70f0d793-d867-4295-a64f-bfbcb7ad8322-cni-binary-copy\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192336 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-netns\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192346 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-bin\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192373 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-systemd\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192380 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-kubelet\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192382 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-cnibin\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192085 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-run-netns\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192416 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-etc-openvswitch\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192446 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-host-var-lib-cni-multus\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192485 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-hostroot\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192498 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-ovn\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192536 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-netd\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192579 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-systemd-units\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192600 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ea5d7d28-22c3-4381-8206-f4853eca74cd-serviceca\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192607 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-cni-binary-copy\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192699 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-config\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.192857 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-env-overrides\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.193131 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/70f0d793-d867-4295-a64f-bfbcb7ad8322-tuning-conf-dir\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.197651 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-proxy-tls\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.197811 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovn-node-metrics-cert\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.207494 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.213952 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k2wx\" (UniqueName: \"kubernetes.io/projected/70f0d793-d867-4295-a64f-bfbcb7ad8322-kube-api-access-2k2wx\") pod \"multus-additional-cni-plugins-x5j9z\" (UID: \"70f0d793-d867-4295-a64f-bfbcb7ad8322\") " pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.218164 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nptlg\" (UniqueName: \"kubernetes.io/projected/1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd-kube-api-access-nptlg\") pod \"machine-config-daemon-kbfvm\" (UID: \"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\") " pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.219494 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmgxv\" (UniqueName: \"kubernetes.io/projected/69c1adb3-d9e7-4302-89d2-60745597f2cc-kube-api-access-lmgxv\") pod \"ovnkube-node-lj789\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.227757 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5299\" (UniqueName: \"kubernetes.io/projected/c9ec0fa1-713d-4824-9a3a-a20eff8c65e0-kube-api-access-m5299\") pod \"multus-gnvm5\" (UID: \"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\") " pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.229414 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.229684 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8kbn\" (UniqueName: \"kubernetes.io/projected/ea5d7d28-22c3-4381-8206-f4853eca74cd-kube-api-access-g8kbn\") pod \"node-ca-h7pkm\" (UID: \"ea5d7d28-22c3-4381-8206-f4853eca74cd\") " pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.246939 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.248716 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.259518 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" Nov 26 06:55:20 crc kubenswrapper[4940]: W1126 06:55:20.274049 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70f0d793_d867_4295_a64f_bfbcb7ad8322.slice/crio-de25f806aea5e93beec668146212e692f8ef277e866828526f61c15b40591068 WatchSource:0}: Error finding container de25f806aea5e93beec668146212e692f8ef277e866828526f61c15b40591068: Status 404 returned error can't find the container with id de25f806aea5e93beec668146212e692f8ef277e866828526f61c15b40591068 Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.274546 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.281957 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-h7pkm" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.291855 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-gnvm5" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.312742 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerStarted","Data":"de25f806aea5e93beec668146212e692f8ef277e866828526f61c15b40591068"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.313833 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"d0a0ea25298facf0b98074c4b30ca1e41c0fb412b1607f3a244a380156378a73"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.319980 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kfhtm" event={"ID":"5a323006-557b-442b-82ce-595f2f77b1f2","Type":"ContainerStarted","Data":"444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.320069 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kfhtm" event={"ID":"5a323006-557b-442b-82ce-595f2f77b1f2","Type":"ContainerStarted","Data":"c5470ee2e6d54ecd6fa6416f7166685f4c337a58d1c4fa577086a4eadd043789"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.323513 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.323560 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"a5cc320760d74b73c3400d41e572d1e50da1b59732e68609576cbe67905ab624"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.324416 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"5545c6b07e356addc8a0191918268101fa01660694a2cb0f5f7f779ea23dcd03"} Nov 26 06:55:20 crc kubenswrapper[4940]: W1126 06:55:20.326642 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9ec0fa1_713d_4824_9a3a_a20eff8c65e0.slice/crio-673a5bc194dc0eb9b542001f511cc2b003bf5cd88d03131f62248106d4171354 WatchSource:0}: Error finding container 673a5bc194dc0eb9b542001f511cc2b003bf5cd88d03131f62248106d4171354: Status 404 returned error can't find the container with id 673a5bc194dc0eb9b542001f511cc2b003bf5cd88d03131f62248106d4171354 Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.328031 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.328086 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.328096 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6e3471f071097b2c0db71b8eb0e18f2ef728c541aa015ccf9418b4993ef4964c"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.331086 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"8fb7d0c6c4f1808ce07e2669cbd34742256b138fd305df72cb933f7e8d151767"} Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.331906 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.333802 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.340726 4940 scope.go:117] "RemoveContainer" containerID="f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048" Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.340899 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.346811 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.371022 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.384134 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.401160 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1948fd7a1254b69380ec50ecd924b3256267ede449d88d148663fc3a43aa3143\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:12Z\\\",\\\"message\\\":\\\"W1126 06:55:02.418263 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1126 06:55:02.418626 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764140102 cert, and key in /tmp/serving-cert-3104727086/serving-signer.crt, /tmp/serving-cert-3104727086/serving-signer.key\\\\nI1126 06:55:02.647922 1 observer_polling.go:159] Starting file observer\\\\nW1126 06:55:02.652479 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 06:55:02.652659 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:02.654086 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3104727086/tls.crt::/tmp/serving-cert-3104727086/tls.key\\\\\\\"\\\\nF1126 06:55:12.893154 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.422440 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.445172 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.458906 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.474738 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.493265 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.539874 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.570119 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.612275 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.662082 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.693975 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.750996 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.773472 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.803875 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.803957 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.803990 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.804009 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.804025 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804075 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:55:22.804033312 +0000 UTC m=+24.324174931 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804131 4940 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804173 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:22.804161206 +0000 UTC m=+24.324302825 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804197 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804225 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804239 4940 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804258 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804269 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804280 4940 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804207 4940 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804271 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:22.804264179 +0000 UTC m=+24.324405798 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804316 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:22.80430979 +0000 UTC m=+24.324451409 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:20 crc kubenswrapper[4940]: E1126 06:55:20.804328 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:22.804322031 +0000 UTC m=+24.324463640 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.812880 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.853638 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.891861 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.934359 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:20 crc kubenswrapper[4940]: I1126 06:55:20.975584 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:20Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.012207 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.051891 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.097013 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.133097 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.165251 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:21 crc kubenswrapper[4940]: E1126 06:55:21.165404 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.165251 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:21 crc kubenswrapper[4940]: E1126 06:55:21.165497 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.169856 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.170788 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.171734 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.172586 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.173394 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.174061 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.174859 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.175663 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.176548 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.177245 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.177914 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.178913 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.179599 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.182174 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.183225 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.183971 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.185161 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.186083 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.186638 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.187880 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.188766 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.189413 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.190697 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.191299 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.192727 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.193290 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.194682 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.195619 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.196258 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.197596 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.198222 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.199306 4940 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.199455 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.201611 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.202850 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.203427 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.205533 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.206825 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.207583 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.208895 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.209848 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.210507 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.211818 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.212902 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.213151 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.213925 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.215117 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.215826 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.216993 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.217939 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.219192 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.219763 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.220224 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.221114 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.221666 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.222500 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.348687 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-h7pkm" event={"ID":"ea5d7d28-22c3-4381-8206-f4853eca74cd","Type":"ContainerStarted","Data":"b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200"} Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.348736 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-h7pkm" event={"ID":"ea5d7d28-22c3-4381-8206-f4853eca74cd","Type":"ContainerStarted","Data":"a1cb8e8c8eb20074d72a75b47079a909f970e768fb8404a0635df6bf2db1ef50"} Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.350951 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71" exitCode=0 Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.351021 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.353692 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4"} Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.353725 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb"} Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.355375 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gnvm5" event={"ID":"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0","Type":"ContainerStarted","Data":"4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb"} Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.355435 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gnvm5" event={"ID":"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0","Type":"ContainerStarted","Data":"673a5bc194dc0eb9b542001f511cc2b003bf5cd88d03131f62248106d4171354"} Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.357306 4940 generic.go:334] "Generic (PLEG): container finished" podID="70f0d793-d867-4295-a64f-bfbcb7ad8322" containerID="29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9" exitCode=0 Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.357346 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerDied","Data":"29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9"} Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.358396 4940 scope.go:117] "RemoveContainer" containerID="f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048" Nov 26 06:55:21 crc kubenswrapper[4940]: E1126 06:55:21.358549 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.371239 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.388297 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.410317 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.427205 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.451614 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.471170 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.497254 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.531529 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.573230 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.613261 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.650078 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.697188 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.736076 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.775869 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.842356 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.864881 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.891234 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.930724 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:21 crc kubenswrapper[4940]: I1126 06:55:21.973750 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:21Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.013302 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.055735 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.097730 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.134478 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.164458 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.164594 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.178582 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.213378 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.256350 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.279298 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.290356 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.292561 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.313743 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.349492 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.361213 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerStarted","Data":"36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98"} Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.364287 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.364418 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.364501 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.364657 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.364722 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.364778 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.365111 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe"} Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.392769 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.409218 4940 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"etcd-crc\" already exists" pod="openshift-etcd/etcd-crc" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.449275 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.493852 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.538951 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.575110 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.616309 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.658349 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.691139 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.731675 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.776316 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.809241 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.826104 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.826187 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.826213 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826259 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:55:26.826238712 +0000 UTC m=+28.346380341 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826304 4940 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.826313 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826339 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:26.826331425 +0000 UTC m=+28.346473044 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826342 4940 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.826363 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826405 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:26.826386497 +0000 UTC m=+28.346528116 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826464 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826467 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826483 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826490 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826498 4940 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826503 4940 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826547 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:26.826532481 +0000 UTC m=+28.346674360 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:22 crc kubenswrapper[4940]: E1126 06:55:22.826599 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:26.826568872 +0000 UTC m=+28.346710721 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.852492 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.890501 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.931090 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:22 crc kubenswrapper[4940]: I1126 06:55:22.972071 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:22Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.013601 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.051801 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.091754 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.132380 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.165173 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:23 crc kubenswrapper[4940]: E1126 06:55:23.165318 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.165407 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:23 crc kubenswrapper[4940]: E1126 06:55:23.165480 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.175176 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.210999 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.256105 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.304773 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.330522 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.368818 4940 generic.go:334] "Generic (PLEG): container finished" podID="70f0d793-d867-4295-a64f-bfbcb7ad8322" containerID="36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98" exitCode=0 Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.368894 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerDied","Data":"36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98"} Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.370430 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.427993 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.454891 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.491914 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.529852 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.580378 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.613458 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.656718 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.698323 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.738101 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.779952 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.809695 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.852007 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.889994 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.930427 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:23 crc kubenswrapper[4940]: I1126 06:55:23.971912 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:23Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.017414 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.052727 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.091650 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.129141 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.165431 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:24 crc kubenswrapper[4940]: E1126 06:55:24.165547 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.174428 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.375557 4940 generic.go:334] "Generic (PLEG): container finished" podID="70f0d793-d867-4295-a64f-bfbcb7ad8322" containerID="47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295" exitCode=0 Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.375613 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerDied","Data":"47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295"} Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.407893 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.425292 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.437641 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.453979 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.465416 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.481289 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.497254 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.513863 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.532757 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.560190 4940 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.561964 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.561997 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.562008 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.562143 4940 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.576336 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.624274 4940 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.624530 4940 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.626278 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.626317 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.626328 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.626346 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.626356 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:24Z","lastTransitionTime":"2025-11-26T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:24 crc kubenswrapper[4940]: E1126 06:55:24.643109 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.647900 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.647943 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.647957 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.647976 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.647988 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:24Z","lastTransitionTime":"2025-11-26T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.655437 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: E1126 06:55:24.661632 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.665638 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.665670 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.665682 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.665698 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.665709 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:24Z","lastTransitionTime":"2025-11-26T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:24 crc kubenswrapper[4940]: E1126 06:55:24.682430 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.687356 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.687394 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.687404 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.687419 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.687431 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:24Z","lastTransitionTime":"2025-11-26T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.700649 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: E1126 06:55:24.706759 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.712663 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.713066 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.713226 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.714214 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.714264 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:24Z","lastTransitionTime":"2025-11-26T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:24 crc kubenswrapper[4940]: E1126 06:55:24.731557 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: E1126 06:55:24.731723 4940 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.733555 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.733592 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.733607 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.733626 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.733639 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:24Z","lastTransitionTime":"2025-11-26T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.738410 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.774963 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.813158 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.836026 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.836117 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.836130 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.836153 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.836165 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:24Z","lastTransitionTime":"2025-11-26T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.939012 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.939074 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.939087 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.939105 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:24 crc kubenswrapper[4940]: I1126 06:55:24.939116 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:24Z","lastTransitionTime":"2025-11-26T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.042518 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.042582 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.042599 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.042623 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.042641 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.144999 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.145081 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.145094 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.145114 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.145132 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.165139 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.165198 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:25 crc kubenswrapper[4940]: E1126 06:55:25.165342 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:25 crc kubenswrapper[4940]: E1126 06:55:25.165619 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.248337 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.248397 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.248415 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.248441 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.248459 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.351513 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.351576 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.351596 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.351627 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.351644 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.384315 4940 generic.go:334] "Generic (PLEG): container finished" podID="70f0d793-d867-4295-a64f-bfbcb7ad8322" containerID="780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925" exitCode=0 Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.384375 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerDied","Data":"780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.392123 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.407540 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.431169 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.449949 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.459382 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.459444 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.459456 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.459528 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.459545 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.464857 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.481027 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.492819 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.514782 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.547417 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.562394 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.562454 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.562469 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.562488 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.562503 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.566810 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.586236 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.619597 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.638584 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.658285 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.665964 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.666024 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.666052 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.666071 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.666085 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.677407 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.691002 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.768748 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.768815 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.768838 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.768866 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.768890 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.873014 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.873088 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.873100 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.873118 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.873130 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.976556 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.976639 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.976664 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.976691 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:25 crc kubenswrapper[4940]: I1126 06:55:25.976710 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:25Z","lastTransitionTime":"2025-11-26T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.079739 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.079794 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.079811 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.079830 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.079846 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.165179 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.165322 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.182335 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.182394 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.182413 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.182437 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.182455 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.285852 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.285920 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.285938 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.285966 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.285987 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.388489 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.388572 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.388594 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.388654 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.388677 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.398798 4940 generic.go:334] "Generic (PLEG): container finished" podID="70f0d793-d867-4295-a64f-bfbcb7ad8322" containerID="2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26" exitCode=0 Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.398856 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerDied","Data":"2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.422939 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.446538 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.464094 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.477907 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.490706 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.490764 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.490779 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.490804 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.490820 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.491125 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.500329 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.515804 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.535805 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.550270 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.564120 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.585681 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.592884 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.592909 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.592917 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.592931 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.592939 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.598195 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.613406 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.628013 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.637465 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:26Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.696221 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.696285 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.696304 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.696328 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.696348 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.800374 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.800971 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.800989 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.801014 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.801031 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.867108 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.867195 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.867218 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.867234 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867250 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:55:34.867231551 +0000 UTC m=+36.387373170 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.867274 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867340 4940 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867369 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867380 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867384 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:34.867376076 +0000 UTC m=+36.387517695 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867391 4940 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867415 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:34.867409087 +0000 UTC m=+36.387550706 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867619 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867670 4940 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867677 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867699 4940 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867712 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:34.867704367 +0000 UTC m=+36.387845986 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:26 crc kubenswrapper[4940]: E1126 06:55:26.867797 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:34.867764988 +0000 UTC m=+36.387906807 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.903445 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.903502 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.903522 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.903548 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:26 crc kubenswrapper[4940]: I1126 06:55:26.903567 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:26Z","lastTransitionTime":"2025-11-26T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.005896 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.005936 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.005948 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.005966 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.005979 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.108198 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.108238 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.108248 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.108263 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.108273 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.164891 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.164891 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:27 crc kubenswrapper[4940]: E1126 06:55:27.165178 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:27 crc kubenswrapper[4940]: E1126 06:55:27.165028 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.210602 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.210642 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.210677 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.210690 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.210700 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.313571 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.313619 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.313632 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.313650 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.313664 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.408381 4940 generic.go:334] "Generic (PLEG): container finished" podID="70f0d793-d867-4295-a64f-bfbcb7ad8322" containerID="481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146" exitCode=0 Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.408454 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerDied","Data":"481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.418286 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.418447 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.418465 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.418485 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.418498 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.420403 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.421443 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.421512 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.431931 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.454464 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.461800 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.462327 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.471393 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.484850 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.504290 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.515943 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.521127 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.521180 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.521194 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.521215 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.521278 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.530486 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.540722 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.559726 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.572243 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.583524 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.613208 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.624586 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.624631 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.624654 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.624674 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.624689 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.625175 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.644639 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.656403 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.669603 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.684742 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.697212 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.705821 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.724533 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.727177 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.727223 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.727236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.727270 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.727284 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.736162 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.747134 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.758395 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.769874 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.782023 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.794621 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.808519 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.829726 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.829775 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.829786 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.829802 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.829812 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.836515 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.848964 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.861720 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:27Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.931801 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.931852 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.931861 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.931875 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:27 crc kubenswrapper[4940]: I1126 06:55:27.931884 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:27Z","lastTransitionTime":"2025-11-26T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.034679 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.034713 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.034721 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.034734 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.034743 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.137721 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.137765 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.137777 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.137794 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.137807 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.165154 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:28 crc kubenswrapper[4940]: E1126 06:55:28.165297 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.242264 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.242328 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.242346 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.242370 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.242388 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.345105 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.345175 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.345199 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.345229 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.345255 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.430812 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" event={"ID":"70f0d793-d867-4295-a64f-bfbcb7ad8322","Type":"ContainerStarted","Data":"7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.430909 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.448274 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.448351 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.448375 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.448407 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.448431 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.465667 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.485024 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.505493 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.528088 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.546789 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.551188 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.551266 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.551292 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.551325 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.551351 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.567024 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.585973 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.613562 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.633248 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.653706 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.653744 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.653754 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.653772 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.653784 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.654353 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.666024 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.688660 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.708985 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.727407 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.746415 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:28Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.756728 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.756777 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.756794 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.756819 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.756834 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.860388 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.860442 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.860453 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.860473 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.860485 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.964066 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.964104 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.964115 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.964131 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:28 crc kubenswrapper[4940]: I1126 06:55:28.964143 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:28Z","lastTransitionTime":"2025-11-26T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.066417 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.066466 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.066476 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.066498 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.066511 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.165452 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:29 crc kubenswrapper[4940]: E1126 06:55:29.165621 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.165692 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:29 crc kubenswrapper[4940]: E1126 06:55:29.165926 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.169245 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.169276 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.169288 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.169303 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.169314 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.182923 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.197536 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.218744 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.237103 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.249831 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.264003 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.271157 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.271184 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.271195 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.271213 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.271225 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.282476 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.301855 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.320816 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.338807 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.348484 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.360445 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.370679 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.374277 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.374304 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.374313 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.374333 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.374343 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.387136 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.399314 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:29Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.436141 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.478277 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.478326 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.478357 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.478376 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.478389 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.581507 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.581575 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.581592 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.581624 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.581642 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.684712 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.684766 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.684778 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.684798 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.684808 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.789162 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.789222 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.789239 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.789263 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.789280 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.892551 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.892588 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.892597 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.892611 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.892620 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.995340 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.995424 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.995455 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.995488 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:29 crc kubenswrapper[4940]: I1126 06:55:29.995512 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:29Z","lastTransitionTime":"2025-11-26T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.098574 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.098621 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.098633 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.098650 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.098662 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.164924 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:30 crc kubenswrapper[4940]: E1126 06:55:30.165131 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.201165 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.201233 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.201256 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.201283 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.201305 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.304224 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.304276 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.304294 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.304317 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.304334 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.407423 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.407478 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.407492 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.407512 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.407526 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.442201 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/0.log" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.446247 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5" exitCode=1 Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.446318 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.447424 4940 scope.go:117] "RemoveContainer" containerID="f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.461998 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.483561 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.498799 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.510095 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.510141 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.510153 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.510176 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.510193 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.514658 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.527813 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.539731 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.553155 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.566648 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.581240 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.595211 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.607148 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.616720 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.616971 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.616997 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.617022 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.617067 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.627410 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.638476 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.655212 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.677922 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:29Z\\\",\\\"message\\\":\\\" reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725324 6245 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725759 6245 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:29.725820 6245 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:29.725830 6245 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:29.725849 6245 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:29.725887 6245 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:29.725892 6245 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:29.725913 6245 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:29.725919 6245 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:29.725929 6245 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:29.726005 6245 factory.go:656] Stopping watch factory\\\\nI1126 06:55:29.726026 6245 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:29.726078 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:29.726093 6245 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:30Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.720181 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.720254 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.720276 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.720299 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.720316 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.823577 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.823625 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.823635 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.823653 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.823664 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.926851 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.926919 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.926939 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.926971 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:30 crc kubenswrapper[4940]: I1126 06:55:30.926995 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:30Z","lastTransitionTime":"2025-11-26T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.030166 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.030203 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.030214 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.030230 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.030243 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.133432 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.133483 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.133494 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.133511 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.133525 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.164978 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:31 crc kubenswrapper[4940]: E1126 06:55:31.165172 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.168145 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:31 crc kubenswrapper[4940]: E1126 06:55:31.168234 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.236153 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.236195 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.236209 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.236227 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.236239 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.338540 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.338599 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.338617 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.338639 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.338659 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.441257 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.441316 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.441333 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.441357 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.441374 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.451401 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/0.log" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.454449 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.454623 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.472680 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.485801 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.504945 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.528251 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:29Z\\\",\\\"message\\\":\\\" reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725324 6245 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725759 6245 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:29.725820 6245 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:29.725830 6245 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:29.725849 6245 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:29.725887 6245 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:29.725892 6245 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:29.725913 6245 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:29.725919 6245 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:29.725929 6245 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:29.726005 6245 factory.go:656] Stopping watch factory\\\\nI1126 06:55:29.726026 6245 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:29.726078 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:29.726093 6245 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.544756 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.544935 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.544992 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.545074 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.545102 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.545119 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.563693 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.592857 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.609661 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.622503 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.636299 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.647066 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.647112 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.647125 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.647142 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.647153 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.650369 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.663489 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.674945 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.687027 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.697561 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.750222 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.750264 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.750280 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.750304 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.750323 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.853387 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.853439 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.853455 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.853475 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.853488 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.956964 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.957064 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.957088 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.957122 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.957161 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:31Z","lastTransitionTime":"2025-11-26T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.962429 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4"] Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.962887 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.965282 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 06:55:31 crc kubenswrapper[4940]: I1126 06:55:31.966077 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.000702 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:31Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.020902 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.037942 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.058007 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.059800 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.059853 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.059872 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.059896 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.059916 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.074833 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.093859 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.113443 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.127237 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f62b1af5-c3ca-4cda-be70-b71166bd9552-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.127354 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzg4l\" (UniqueName: \"kubernetes.io/projected/f62b1af5-c3ca-4cda-be70-b71166bd9552-kube-api-access-pzg4l\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.127454 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f62b1af5-c3ca-4cda-be70-b71166bd9552-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.127523 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f62b1af5-c3ca-4cda-be70-b71166bd9552-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.132368 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.147623 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.163456 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.163528 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.163553 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.163584 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.163607 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.164395 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.165083 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:32 crc kubenswrapper[4940]: E1126 06:55:32.165262 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.185835 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.199706 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.220852 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.230488 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzg4l\" (UniqueName: \"kubernetes.io/projected/f62b1af5-c3ca-4cda-be70-b71166bd9552-kube-api-access-pzg4l\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.230566 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f62b1af5-c3ca-4cda-be70-b71166bd9552-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.230641 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f62b1af5-c3ca-4cda-be70-b71166bd9552-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.230686 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f62b1af5-c3ca-4cda-be70-b71166bd9552-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.231400 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f62b1af5-c3ca-4cda-be70-b71166bd9552-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.232595 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f62b1af5-c3ca-4cda-be70-b71166bd9552-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.237754 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f62b1af5-c3ca-4cda-be70-b71166bd9552-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.278526 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:29Z\\\",\\\"message\\\":\\\" reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725324 6245 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725759 6245 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:29.725820 6245 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:29.725830 6245 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:29.725849 6245 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:29.725887 6245 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:29.725892 6245 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:29.725913 6245 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:29.725919 6245 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:29.725929 6245 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:29.726005 6245 factory.go:656] Stopping watch factory\\\\nI1126 06:55:29.726026 6245 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:29.726078 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:29.726093 6245 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.282168 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.282230 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.282249 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.282273 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.282290 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.284059 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzg4l\" (UniqueName: \"kubernetes.io/projected/f62b1af5-c3ca-4cda-be70-b71166bd9552-kube-api-access-pzg4l\") pod \"ovnkube-control-plane-749d76644c-7v8p4\" (UID: \"f62b1af5-c3ca-4cda-be70-b71166bd9552\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.288221 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.314705 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: W1126 06:55:32.346053 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf62b1af5_c3ca_4cda_be70_b71166bd9552.slice/crio-b6fc5c391274d2a682769e50d9a283ae84944d09f362944fd97a09a1bf9529fe WatchSource:0}: Error finding container b6fc5c391274d2a682769e50d9a283ae84944d09f362944fd97a09a1bf9529fe: Status 404 returned error can't find the container with id b6fc5c391274d2a682769e50d9a283ae84944d09f362944fd97a09a1bf9529fe Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.357567 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.384565 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.384596 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.384605 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.384617 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.384626 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.459969 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/1.log" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.460779 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/0.log" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.464283 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026" exitCode=1 Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.464375 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.464424 4940 scope.go:117] "RemoveContainer" containerID="f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.465098 4940 scope.go:117] "RemoveContainer" containerID="2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026" Nov 26 06:55:32 crc kubenswrapper[4940]: E1126 06:55:32.465259 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.465596 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" event={"ID":"f62b1af5-c3ca-4cda-be70-b71166bd9552","Type":"ContainerStarted","Data":"b6fc5c391274d2a682769e50d9a283ae84944d09f362944fd97a09a1bf9529fe"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.476552 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.487898 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.487943 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.487958 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.487978 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.487991 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.490334 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.502061 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.514062 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.535181 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.550021 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.570838 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.582058 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.592464 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.592511 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.592524 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.592546 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.592562 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.597019 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.612247 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.628708 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.642720 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.659569 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.680772 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:29Z\\\",\\\"message\\\":\\\" reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725324 6245 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725759 6245 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:29.725820 6245 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:29.725830 6245 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:29.725849 6245 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:29.725887 6245 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:29.725892 6245 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:29.725913 6245 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:29.725919 6245 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:29.725929 6245 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:29.726005 6245 factory.go:656] Stopping watch factory\\\\nI1126 06:55:29.726026 6245 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:29.726078 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:29.726093 6245 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"ift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{apps/v1 Deployment router-default bb5b8eff-af7d-47c8-85cd-465e0b29b7d1 0xc0006b3d1e \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{1 0 http},NodePort:0,AppProtocol:nil,},ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{1 0 https},NodePort:0,AppProtocol:nil,},ServicePort{Name:metrics,Protocol:TCP,Port:1936,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default,},ClusterIP:10.217.4.176,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.176],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 06:55:31.629120 6386 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-x5j9z\\\\nI1126 06:55:31.629129 6386\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.695238 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.695290 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.695300 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.695313 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.695325 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.699911 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.729088 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:32Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.797174 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.797218 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.797232 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.797252 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.797268 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.900070 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.900137 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.900157 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.900183 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:32 crc kubenswrapper[4940]: I1126 06:55:32.900207 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:32Z","lastTransitionTime":"2025-11-26T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.003922 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.004096 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.004127 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.004210 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.004232 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.107548 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.107615 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.107639 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.107685 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.107734 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.164919 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.164995 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:33 crc kubenswrapper[4940]: E1126 06:55:33.165152 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:33 crc kubenswrapper[4940]: E1126 06:55:33.165340 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.211880 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.211921 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.211930 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.211948 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.211960 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.315426 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.315484 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.315494 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.315514 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.315529 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.418558 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.418629 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.418647 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.418670 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.418687 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.469266 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-nfh6j"] Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.470227 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:33 crc kubenswrapper[4940]: E1126 06:55:33.470288 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.472165 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" event={"ID":"f62b1af5-c3ca-4cda-be70-b71166bd9552","Type":"ContainerStarted","Data":"3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.472205 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" event={"ID":"f62b1af5-c3ca-4cda-be70-b71166bd9552","Type":"ContainerStarted","Data":"431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.474620 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/1.log" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.490113 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.503622 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.513216 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.521498 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.521542 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.521554 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.521574 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.521587 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.544153 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnd9c\" (UniqueName: \"kubernetes.io/projected/b1a21dbc-3d52-4bc2-805b-65dc954babce-kube-api-access-tnd9c\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.544204 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.544947 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.560076 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.575429 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.586334 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.599376 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.610211 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.622450 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.623942 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.623975 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.623985 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.623998 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.624007 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.634578 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.645022 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnd9c\" (UniqueName: \"kubernetes.io/projected/b1a21dbc-3d52-4bc2-805b-65dc954babce-kube-api-access-tnd9c\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.645097 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:33 crc kubenswrapper[4940]: E1126 06:55:33.645231 4940 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:33 crc kubenswrapper[4940]: E1126 06:55:33.645290 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs podName:b1a21dbc-3d52-4bc2-805b-65dc954babce nodeName:}" failed. No retries permitted until 2025-11-26 06:55:34.145269793 +0000 UTC m=+35.665411412 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs") pod "network-metrics-daemon-nfh6j" (UID: "b1a21dbc-3d52-4bc2-805b-65dc954babce") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.645685 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.662320 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.664361 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnd9c\" (UniqueName: \"kubernetes.io/projected/b1a21dbc-3d52-4bc2-805b-65dc954babce-kube-api-access-tnd9c\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.683598 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:29Z\\\",\\\"message\\\":\\\" reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725324 6245 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725759 6245 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:29.725820 6245 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:29.725830 6245 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:29.725849 6245 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:29.725887 6245 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:29.725892 6245 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:29.725913 6245 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:29.725919 6245 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:29.725929 6245 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:29.726005 6245 factory.go:656] Stopping watch factory\\\\nI1126 06:55:29.726026 6245 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:29.726078 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:29.726093 6245 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"ift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{apps/v1 Deployment router-default bb5b8eff-af7d-47c8-85cd-465e0b29b7d1 0xc0006b3d1e \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{1 0 http},NodePort:0,AppProtocol:nil,},ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{1 0 https},NodePort:0,AppProtocol:nil,},ServicePort{Name:metrics,Protocol:TCP,Port:1936,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default,},ClusterIP:10.217.4.176,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.176],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 06:55:31.629120 6386 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-x5j9z\\\\nI1126 06:55:31.629129 6386\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.696895 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.709002 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.720884 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.726894 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.726927 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.726938 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.726953 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.726963 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.733428 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.756881 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.768864 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.781003 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.797127 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.811640 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.828517 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.829741 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.829797 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.829820 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.829850 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.829872 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.845711 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.858887 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.877097 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.889340 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.903814 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.917657 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.932773 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.932808 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.932817 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.932833 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.932843 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:33Z","lastTransitionTime":"2025-11-26T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.939477 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.953034 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.973318 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:33 crc kubenswrapper[4940]: I1126 06:55:33.994299 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:29Z\\\",\\\"message\\\":\\\" reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725324 6245 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725759 6245 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:29.725820 6245 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:29.725830 6245 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:29.725849 6245 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:29.725887 6245 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:29.725892 6245 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:29.725913 6245 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:29.725919 6245 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:29.725929 6245 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:29.726005 6245 factory.go:656] Stopping watch factory\\\\nI1126 06:55:29.726026 6245 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:29.726078 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:29.726093 6245 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"ift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{apps/v1 Deployment router-default bb5b8eff-af7d-47c8-85cd-465e0b29b7d1 0xc0006b3d1e \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{1 0 http},NodePort:0,AppProtocol:nil,},ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{1 0 https},NodePort:0,AppProtocol:nil,},ServicePort{Name:metrics,Protocol:TCP,Port:1936,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default,},ClusterIP:10.217.4.176,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.176],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 06:55:31.629120 6386 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-x5j9z\\\\nI1126 06:55:31.629129 6386\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:33Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.035670 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.035728 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.035745 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.035770 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.035786 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.139123 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.139221 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.139240 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.139266 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.139283 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.150134 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.150346 4940 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.150488 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs podName:b1a21dbc-3d52-4bc2-805b-65dc954babce nodeName:}" failed. No retries permitted until 2025-11-26 06:55:35.150422733 +0000 UTC m=+36.670564392 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs") pod "network-metrics-daemon-nfh6j" (UID: "b1a21dbc-3d52-4bc2-805b-65dc954babce") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.165341 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.165511 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.242920 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.242999 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.243019 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.243076 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.243095 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.347145 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.347216 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.347255 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.347288 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.347309 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.450185 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.450251 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.450268 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.450294 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.450314 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.559231 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.559321 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.559348 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.559379 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.559401 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.662959 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.663007 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.663025 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.663083 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.663108 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.766205 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.766659 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.766677 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.766703 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.766726 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.870591 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.870673 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.870691 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.870718 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.870736 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.959862 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.960021 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.960119 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960218 4940 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960232 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:55:50.960183611 +0000 UTC m=+52.480325270 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960302 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:50.960278334 +0000 UTC m=+52.480419993 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960301 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960354 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960371 4940 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.960387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960453 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:50.960416778 +0000 UTC m=+52.480558407 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.960491 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960635 4940 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960661 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960680 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960690 4940 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960718 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:50.960701597 +0000 UTC m=+52.480843256 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:34 crc kubenswrapper[4940]: E1126 06:55:34.960745 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:55:50.960733038 +0000 UTC m=+52.480874687 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.975102 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.975207 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.975267 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.975297 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:34 crc kubenswrapper[4940]: I1126 06:55:34.975317 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:34Z","lastTransitionTime":"2025-11-26T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.029458 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.029522 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.029536 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.029561 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.029577 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.048026 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.052878 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.052919 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.052932 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.052954 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.052969 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.073425 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.077199 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.077229 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.077241 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.077258 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.077270 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.092886 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.097208 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.097278 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.097298 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.097324 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.097343 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.115990 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.120743 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.120803 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.120821 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.120849 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.120868 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.139810 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.140035 4940 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.141935 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.141990 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.142009 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.142034 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.142083 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.162759 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.163078 4940 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.163187 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs podName:b1a21dbc-3d52-4bc2-805b-65dc954babce nodeName:}" failed. No retries permitted until 2025-11-26 06:55:37.16315855 +0000 UTC m=+38.683300209 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs") pod "network-metrics-daemon-nfh6j" (UID: "b1a21dbc-3d52-4bc2-805b-65dc954babce") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.165325 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.165365 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.165488 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.165514 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.165809 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:35 crc kubenswrapper[4940]: E1126 06:55:35.165767 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.167714 4940 scope.go:117] "RemoveContainer" containerID="f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.244918 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.244968 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.244984 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.245007 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.245025 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.348050 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.348085 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.348094 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.348107 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.348117 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.450969 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.451002 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.451010 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.451027 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.451053 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.488281 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.492175 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.492739 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.512988 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.525832 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.546563 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.553227 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.553292 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.553309 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.553334 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.553350 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.577286 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:29Z\\\",\\\"message\\\":\\\" reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725324 6245 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725759 6245 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:29.725820 6245 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:29.725830 6245 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:29.725849 6245 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:29.725887 6245 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:29.725892 6245 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:29.725913 6245 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:29.725919 6245 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:29.725929 6245 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:29.726005 6245 factory.go:656] Stopping watch factory\\\\nI1126 06:55:29.726026 6245 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:29.726078 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:29.726093 6245 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"ift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{apps/v1 Deployment router-default bb5b8eff-af7d-47c8-85cd-465e0b29b7d1 0xc0006b3d1e \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{1 0 http},NodePort:0,AppProtocol:nil,},ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{1 0 https},NodePort:0,AppProtocol:nil,},ServicePort{Name:metrics,Protocol:TCP,Port:1936,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default,},ClusterIP:10.217.4.176,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.176],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 06:55:31.629120 6386 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-x5j9z\\\\nI1126 06:55:31.629129 6386\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.596305 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.615474 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.646995 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.656555 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.656627 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.656653 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.656682 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.656704 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.663177 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.676998 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.693427 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.709308 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.724471 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.739481 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.759951 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.760000 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.760018 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.760060 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.760079 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.763451 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.781530 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.795946 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.811022 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.863273 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.863332 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.863347 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.863400 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.863418 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.966185 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.966233 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.966244 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.966260 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:35 crc kubenswrapper[4940]: I1126 06:55:35.966271 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:35Z","lastTransitionTime":"2025-11-26T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.069202 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.069240 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.069249 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.069266 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.069277 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.164873 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:36 crc kubenswrapper[4940]: E1126 06:55:36.165003 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.172645 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.172764 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.172783 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.172799 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.172810 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.275975 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.276082 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.276102 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.276131 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.276151 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.379442 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.379502 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.379517 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.379541 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.379556 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.484078 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.484183 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.484237 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.484546 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.484607 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.587142 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.587201 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.587214 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.587233 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.587246 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.690151 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.690196 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.690212 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.690235 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.690252 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.793343 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.793395 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.793414 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.793440 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.793457 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.896526 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.896585 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.896602 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.896628 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:36 crc kubenswrapper[4940]: I1126 06:55:36.896644 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:36Z","lastTransitionTime":"2025-11-26T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.000240 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.000314 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.000336 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.000365 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.000384 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.103719 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.103766 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.103782 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.103806 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.103824 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.165156 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.165210 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.165178 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:37 crc kubenswrapper[4940]: E1126 06:55:37.165380 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:37 crc kubenswrapper[4940]: E1126 06:55:37.165495 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:37 crc kubenswrapper[4940]: E1126 06:55:37.165634 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.183233 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:37 crc kubenswrapper[4940]: E1126 06:55:37.183405 4940 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:37 crc kubenswrapper[4940]: E1126 06:55:37.183501 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs podName:b1a21dbc-3d52-4bc2-805b-65dc954babce nodeName:}" failed. No retries permitted until 2025-11-26 06:55:41.183475259 +0000 UTC m=+42.703616908 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs") pod "network-metrics-daemon-nfh6j" (UID: "b1a21dbc-3d52-4bc2-805b-65dc954babce") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.207250 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.207302 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.207321 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.207348 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.207367 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.310987 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.311057 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.311107 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.311138 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.311158 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.414577 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.414626 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.414644 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.414666 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.414683 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.517673 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.517752 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.517770 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.517795 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.517815 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.620239 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.620295 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.620312 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.620335 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.620351 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.725719 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.725780 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.725804 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.725833 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.725858 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.829114 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.829192 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.829214 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.829237 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.829255 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.931701 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.931756 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.931773 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.931798 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:37 crc kubenswrapper[4940]: I1126 06:55:37.931816 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:37Z","lastTransitionTime":"2025-11-26T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.035465 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.035515 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.035537 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.035560 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.035577 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.138735 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.138797 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.138814 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.138838 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.138858 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.168326 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:38 crc kubenswrapper[4940]: E1126 06:55:38.168557 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.241366 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.241705 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.241915 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.242163 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.242334 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.345829 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.345904 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.345928 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.345955 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.345979 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.449027 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.449126 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.449150 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.449180 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.449202 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.552606 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.552684 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.552708 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.552738 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.552760 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.656149 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.656236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.656254 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.656277 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.656295 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.760779 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.760844 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.760866 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.760896 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.760918 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.863424 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.863466 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.863476 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.863512 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.863522 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.965675 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.965735 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.965753 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.965776 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:38 crc kubenswrapper[4940]: I1126 06:55:38.965792 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:38Z","lastTransitionTime":"2025-11-26T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.073987 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.074198 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.074228 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.074307 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.074339 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.165455 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.165518 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:39 crc kubenswrapper[4940]: E1126 06:55:39.165647 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.165839 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:39 crc kubenswrapper[4940]: E1126 06:55:39.165976 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:39 crc kubenswrapper[4940]: E1126 06:55:39.166211 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.178210 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.178265 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.178288 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.178321 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.178341 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.196152 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.229972 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f346ddcf6c2487456304869437101275cc2a5d240cb010b64556f30473b546f5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:29Z\\\",\\\"message\\\":\\\" reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725324 6245 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:29.725759 6245 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:29.725820 6245 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:29.725830 6245 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:29.725849 6245 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:29.725887 6245 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:29.725892 6245 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:29.725913 6245 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:29.725919 6245 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:29.725929 6245 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:29.726005 6245 factory.go:656] Stopping watch factory\\\\nI1126 06:55:29.726026 6245 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:29.726078 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:29.726093 6245 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"ift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{apps/v1 Deployment router-default bb5b8eff-af7d-47c8-85cd-465e0b29b7d1 0xc0006b3d1e \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{1 0 http},NodePort:0,AppProtocol:nil,},ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{1 0 https},NodePort:0,AppProtocol:nil,},ServicePort{Name:metrics,Protocol:TCP,Port:1936,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default,},ClusterIP:10.217.4.176,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.176],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 06:55:31.629120 6386 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-x5j9z\\\\nI1126 06:55:31.629129 6386\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.255551 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.280696 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.281436 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.281584 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.281703 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.281836 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.281954 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.298371 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.319031 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.340489 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.358670 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.385638 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.385697 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.385717 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.385744 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.385762 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.391277 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.420678 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.462720 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.479273 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.488510 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.488547 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.488559 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.488576 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.488589 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.493486 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.507556 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.527460 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.539165 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.551085 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.591156 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.591208 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.591220 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.591236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.591247 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.693840 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.693944 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.693964 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.694015 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.694034 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.798355 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.798458 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.798517 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.798541 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.798606 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.903150 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.903230 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.903267 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.903300 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:39 crc kubenswrapper[4940]: I1126 06:55:39.903325 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:39Z","lastTransitionTime":"2025-11-26T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.006587 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.006639 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.006653 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.006672 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.006685 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.109682 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.109764 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.109783 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.109808 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.109828 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.164990 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:40 crc kubenswrapper[4940]: E1126 06:55:40.165203 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.214355 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.214414 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.214429 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.214452 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.214468 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.316847 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.317107 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.317183 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.317250 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.317309 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.420210 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.420592 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.420658 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.420734 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.420806 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.524246 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.524318 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.524340 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.524366 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.524383 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.627414 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.627474 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.627498 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.627527 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.627550 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.730562 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.730622 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.730638 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.730685 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.730702 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.833961 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.834021 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.834086 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.834117 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.834140 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.937006 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.937111 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.937145 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.937180 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:40 crc kubenswrapper[4940]: I1126 06:55:40.937202 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:40Z","lastTransitionTime":"2025-11-26T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.040392 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.040459 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.040477 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.040504 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.040522 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.143301 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.143350 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.143358 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.143373 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.143383 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.164763 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.164884 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.165201 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:41 crc kubenswrapper[4940]: E1126 06:55:41.165180 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:41 crc kubenswrapper[4940]: E1126 06:55:41.165267 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:41 crc kubenswrapper[4940]: E1126 06:55:41.165335 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.239637 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:41 crc kubenswrapper[4940]: E1126 06:55:41.239848 4940 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:41 crc kubenswrapper[4940]: E1126 06:55:41.239974 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs podName:b1a21dbc-3d52-4bc2-805b-65dc954babce nodeName:}" failed. No retries permitted until 2025-11-26 06:55:49.239946031 +0000 UTC m=+50.760087680 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs") pod "network-metrics-daemon-nfh6j" (UID: "b1a21dbc-3d52-4bc2-805b-65dc954babce") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.246889 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.246943 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.247369 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.247414 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.247438 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.349581 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.349631 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.349668 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.349696 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.349792 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.452980 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.453036 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.453084 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.453110 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.453131 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.556576 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.556640 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.556664 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.556695 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.556719 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.660270 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.660338 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.660363 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.660392 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.660412 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.763339 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.764739 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.764960 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.765229 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.765424 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.869258 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.869327 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.869346 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.869376 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.869400 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.972270 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.972771 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.972893 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.972985 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:41 crc kubenswrapper[4940]: I1126 06:55:41.973075 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:41Z","lastTransitionTime":"2025-11-26T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.076155 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.076208 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.076218 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.076238 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.076251 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.164860 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:42 crc kubenswrapper[4940]: E1126 06:55:42.165482 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.179138 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.179238 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.179256 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.179281 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.179302 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.280208 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.281583 4940 scope.go:117] "RemoveContainer" containerID="2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.282371 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.282428 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.282451 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.282479 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.282503 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.306895 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.325636 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.350264 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.385412 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.385471 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.385491 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.385515 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.385533 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.390034 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"ift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{apps/v1 Deployment router-default bb5b8eff-af7d-47c8-85cd-465e0b29b7d1 0xc0006b3d1e \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{1 0 http},NodePort:0,AppProtocol:nil,},ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{1 0 https},NodePort:0,AppProtocol:nil,},ServicePort{Name:metrics,Protocol:TCP,Port:1936,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default,},ClusterIP:10.217.4.176,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.176],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 06:55:31.629120 6386 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-x5j9z\\\\nI1126 06:55:31.629129 6386\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.411747 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.431879 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.451142 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.473131 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.488318 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.488374 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.488385 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.488401 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.488411 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.493758 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.507550 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.518239 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/1.log" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.521994 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.523248 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.538048 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.555138 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.574354 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.590713 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.591882 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.591934 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.591956 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.591985 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.592009 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.605013 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.622982 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.639598 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.663991 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.685793 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.694271 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.694344 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.694365 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.694389 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.694409 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.705637 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.722175 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.737027 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.760416 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.780575 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.796919 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.797022 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.797075 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.797111 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.797136 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.800602 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.823710 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.836675 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.845824 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.857773 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.871524 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.880137 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.893204 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.899427 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.899464 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.899473 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.899485 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.899494 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:42Z","lastTransitionTime":"2025-11-26T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.912085 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"ift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{apps/v1 Deployment router-default bb5b8eff-af7d-47c8-85cd-465e0b29b7d1 0xc0006b3d1e \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{1 0 http},NodePort:0,AppProtocol:nil,},ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{1 0 https},NodePort:0,AppProtocol:nil,},ServicePort{Name:metrics,Protocol:TCP,Port:1936,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default,},ClusterIP:10.217.4.176,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.176],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 06:55:31.629120 6386 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-x5j9z\\\\nI1126 06:55:31.629129 6386\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:42 crc kubenswrapper[4940]: I1126 06:55:42.927035 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:42Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.001405 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.001465 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.001482 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.001506 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.001524 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.104340 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.104391 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.104404 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.104422 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.104452 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.165529 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.165628 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:43 crc kubenswrapper[4940]: E1126 06:55:43.165696 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.165715 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:43 crc kubenswrapper[4940]: E1126 06:55:43.165839 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:43 crc kubenswrapper[4940]: E1126 06:55:43.166070 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.207523 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.207592 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.207609 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.207631 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.207649 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.310879 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.311245 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.311258 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.311276 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.311290 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.414274 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.414365 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.414383 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.414411 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.414437 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.516690 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.516756 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.516775 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.516800 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.516817 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.528216 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/2.log" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.529346 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/1.log" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.533983 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d" exitCode=1 Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.534069 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.534223 4940 scope.go:117] "RemoveContainer" containerID="2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.535738 4940 scope.go:117] "RemoveContainer" containerID="714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d" Nov 26 06:55:43 crc kubenswrapper[4940]: E1126 06:55:43.536129 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.550187 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.564676 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.587694 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.601173 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.612766 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.620159 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.620215 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.620232 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.620255 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.620270 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.625718 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.646951 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f21992e45982f1e3e6e2263eab14c2afa593e495e2237c42cb87962842ac026\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"message\\\":\\\"ift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{apps/v1 Deployment router-default bb5b8eff-af7d-47c8-85cd-465e0b29b7d1 0xc0006b3d1e \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{1 0 http},NodePort:0,AppProtocol:nil,},ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{1 0 https},NodePort:0,AppProtocol:nil,},ServicePort{Name:metrics,Protocol:TCP,Port:1936,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default,},ClusterIP:10.217.4.176,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.176],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 06:55:31.629120 6386 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-x5j9z\\\\nI1126 06:55:31.629129 6386\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.661376 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.673988 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.686360 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.706776 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.723411 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.724093 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.724140 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.724158 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.724182 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.724199 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.735143 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.759228 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.776211 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.795802 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.816080 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.827139 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.827180 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.827189 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.827204 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.827214 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.930387 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.930436 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.930453 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.930482 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:43 crc kubenswrapper[4940]: I1126 06:55:43.930504 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:43Z","lastTransitionTime":"2025-11-26T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.034186 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.034256 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.034276 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.034300 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.034317 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.137079 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.137144 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.137194 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.137223 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.137240 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.165612 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:44 crc kubenswrapper[4940]: E1126 06:55:44.165848 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.240048 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.240094 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.240103 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.240119 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.240134 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.343203 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.343262 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.343278 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.343315 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.343328 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.447344 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.447412 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.447439 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.447480 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.447504 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.539599 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/2.log" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.543502 4940 scope.go:117] "RemoveContainer" containerID="714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d" Nov 26 06:55:44 crc kubenswrapper[4940]: E1126 06:55:44.543694 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.549787 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.549833 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.549891 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.549912 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.549927 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.566110 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.585179 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.602881 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.618977 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.634441 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.653376 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.653437 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.653455 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.653549 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.653576 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.669733 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.689972 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.710721 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.729487 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.753866 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.758144 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.758251 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.758265 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.758331 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.758345 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.769520 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.784996 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.800678 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.813786 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.829027 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.850792 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.861724 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.861762 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.861773 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.861791 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.861803 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.873826 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.965223 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.965284 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.965301 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.965325 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:44 crc kubenswrapper[4940]: I1126 06:55:44.965343 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:44Z","lastTransitionTime":"2025-11-26T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.069377 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.069454 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.069482 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.069515 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.069538 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.164812 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.164813 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.165098 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.165366 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.165529 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.165725 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.171954 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.171988 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.171997 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.172012 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.172023 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.275613 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.275667 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.275679 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.275697 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.275710 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.291539 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.291597 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.291614 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.291641 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.291660 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.312929 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.317821 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.317863 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.317875 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.317893 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.317907 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.336870 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.342742 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.342798 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.342816 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.342840 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.342858 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.362837 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.367974 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.368073 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.368094 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.368118 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.368138 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.390922 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.396164 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.396252 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.396271 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.396297 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.396317 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.415376 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: E1126 06:55:45.415597 4940 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.418119 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.418163 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.418181 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.418205 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.418223 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.521575 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.521621 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.521640 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.521665 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.521684 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.624093 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.624174 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.624192 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.624214 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.624230 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.728589 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.728657 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.728676 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.728703 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.728723 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.809831 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.825708 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.839970 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.840029 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.840059 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.840094 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.840108 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.847043 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.863998 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.879792 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.894846 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.912413 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.931450 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.943781 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.943861 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.943875 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.943900 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.943915 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:45Z","lastTransitionTime":"2025-11-26T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.945262 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.962174 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:45 crc kubenswrapper[4940]: I1126 06:55:45.984122 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:45.999904 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:45Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.013255 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.025866 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.041692 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.047399 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.047467 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.047482 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.047505 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.047521 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.059089 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.071753 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.094415 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.150997 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.151096 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.151109 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.151188 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.151209 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.165370 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:46 crc kubenswrapper[4940]: E1126 06:55:46.165516 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.255137 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.255198 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.255209 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.255230 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.255247 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.357944 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.357994 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.358005 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.358026 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.358059 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.461518 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.461563 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.461573 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.461591 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.461605 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.564272 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.564326 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.564337 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.564359 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.564370 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.666646 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.666693 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.666705 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.666734 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.666747 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.775663 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.775703 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.775713 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.775725 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.775733 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.877747 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.877780 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.877788 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.877803 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.877812 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.981638 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.981705 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.981730 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.981759 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:46 crc kubenswrapper[4940]: I1126 06:55:46.981776 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:46Z","lastTransitionTime":"2025-11-26T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.084141 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.084204 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.084221 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.084248 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.084266 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.164809 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.164886 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:47 crc kubenswrapper[4940]: E1126 06:55:47.164972 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.164809 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:47 crc kubenswrapper[4940]: E1126 06:55:47.165204 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:47 crc kubenswrapper[4940]: E1126 06:55:47.165355 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.186815 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.186873 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.186893 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.186915 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.186934 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.290355 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.290397 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.290408 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.290428 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.290443 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.393597 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.393685 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.393713 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.393746 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.393769 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.497410 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.497570 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.497590 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.497615 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.497633 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.609665 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.609725 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.609750 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.609780 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.609802 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.712647 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.712728 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.712746 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.712772 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.712791 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.818374 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.818434 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.818453 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.818481 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.818499 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.921271 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.921332 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.921352 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.921379 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:47 crc kubenswrapper[4940]: I1126 06:55:47.921403 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:47Z","lastTransitionTime":"2025-11-26T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.024217 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.024273 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.024292 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.024315 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.024331 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.126890 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.126939 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.126956 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.126980 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.126997 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.164964 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:48 crc kubenswrapper[4940]: E1126 06:55:48.165185 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.229761 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.229819 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.229838 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.229861 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.229881 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.333310 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.333388 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.333408 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.333432 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.333451 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.435771 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.435830 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.435847 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.435870 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.435888 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.538862 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.538916 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.538935 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.538961 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.538979 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.642557 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.642633 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.642650 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.642680 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.642697 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.745654 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.745707 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.745728 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.745755 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.745776 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.848615 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.848687 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.848710 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.848739 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.848762 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.951967 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.952076 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.952095 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.952118 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:48 crc kubenswrapper[4940]: I1126 06:55:48.952139 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:48Z","lastTransitionTime":"2025-11-26T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.055396 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.055484 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.055511 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.055547 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.055571 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.159016 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.159125 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.159151 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.159175 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.159193 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.165537 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.165665 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.165677 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:49 crc kubenswrapper[4940]: E1126 06:55:49.165813 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:49 crc kubenswrapper[4940]: E1126 06:55:49.166036 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:49 crc kubenswrapper[4940]: E1126 06:55:49.166231 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.201135 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.226525 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.246736 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.261315 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.261375 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.261399 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.261428 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.261450 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.277676 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.291764 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.309439 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.323200 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.336348 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:49 crc kubenswrapper[4940]: E1126 06:55:49.336513 4940 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:49 crc kubenswrapper[4940]: E1126 06:55:49.336596 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs podName:b1a21dbc-3d52-4bc2-805b-65dc954babce nodeName:}" failed. No retries permitted until 2025-11-26 06:56:05.336563697 +0000 UTC m=+66.856705326 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs") pod "network-metrics-daemon-nfh6j" (UID: "b1a21dbc-3d52-4bc2-805b-65dc954babce") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.337988 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.350297 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.362165 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.364651 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.364708 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.364727 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.364753 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.364773 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.375285 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.391548 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.403982 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.422005 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.455874 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.467692 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.467831 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.467930 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.468022 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.468141 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.470436 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.482613 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.570624 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.570980 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.571199 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.571349 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.571469 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.675469 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.675536 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.675558 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.675593 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.675615 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.778792 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.778830 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.778839 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.778852 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.778861 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.881532 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.881844 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.881915 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.882023 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.882124 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.986215 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.987121 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.987298 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.987521 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:49 crc kubenswrapper[4940]: I1126 06:55:49.987712 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:49Z","lastTransitionTime":"2025-11-26T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.090966 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.091008 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.091018 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.091032 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.091066 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.164945 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:50 crc kubenswrapper[4940]: E1126 06:55:50.165116 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.194391 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.194458 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.194475 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.194497 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.194515 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.298118 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.298174 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.298193 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.298220 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.298240 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.401464 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.401775 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.401913 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.402097 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.402294 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.505218 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.505302 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.505326 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.505361 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.505384 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.608995 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.609116 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.609137 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.609162 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.609183 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.712187 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.712258 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.712281 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.712309 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.712332 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.818600 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.818656 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.818681 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.818730 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.818756 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.922661 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.922707 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.922721 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.922740 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:50 crc kubenswrapper[4940]: I1126 06:55:50.922752 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:50Z","lastTransitionTime":"2025-11-26T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.025756 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.025823 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.025841 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.025868 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.025886 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.055554 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.055715 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.055880 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.055906 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.055873 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:23.055833721 +0000 UTC m=+84.575975380 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.055928 4940 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.056002 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:56:23.055981916 +0000 UTC m=+84.576123575 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.056411 4940 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.056556 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:56:23.056520013 +0000 UTC m=+84.576661802 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.056113 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.056693 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.056768 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.056871 4940 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.056963 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:56:23.056942237 +0000 UTC m=+84.577083886 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.056999 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.057031 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.057091 4940 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.057177 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:56:23.057153744 +0000 UTC m=+84.577295533 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.128325 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.128369 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.128384 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.128404 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.128419 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.165400 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.165470 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.165531 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.165553 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.165730 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:51 crc kubenswrapper[4940]: E1126 06:55:51.165989 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.230554 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.230833 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.230893 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.230962 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.231024 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.333540 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.333606 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.333625 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.333651 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.333672 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.436770 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.436842 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.436862 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.436888 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.436907 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.540183 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.540245 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.540301 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.540328 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.540393 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.660368 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.660480 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.660510 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.660542 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.660570 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.764349 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.764412 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.764429 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.764453 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.764469 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.867802 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.867868 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.867886 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.867911 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.867928 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.971014 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.971132 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.971155 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.971185 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:51 crc kubenswrapper[4940]: I1126 06:55:51.971202 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:51Z","lastTransitionTime":"2025-11-26T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.074149 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.074234 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.074268 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.074338 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.074360 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.165162 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:52 crc kubenswrapper[4940]: E1126 06:55:52.165377 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.176589 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.176637 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.176650 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.176666 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.176679 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.279385 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.279455 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.279476 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.279504 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.279524 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.382130 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.382176 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.382188 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.382205 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.382218 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.485596 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.485658 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.485682 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.485712 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.485734 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.587892 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.587949 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.587970 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.587997 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.588018 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.691476 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.691604 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.691625 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.691651 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.691670 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.795454 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.795522 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.795540 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.795565 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.795582 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.899801 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.899881 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.899905 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.899935 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:52 crc kubenswrapper[4940]: I1126 06:55:52.899955 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:52Z","lastTransitionTime":"2025-11-26T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.002519 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.002886 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.003094 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.003267 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.003444 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.106419 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.107303 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.107340 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.107376 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.107400 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.165457 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.165465 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.165555 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:53 crc kubenswrapper[4940]: E1126 06:55:53.165697 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:53 crc kubenswrapper[4940]: E1126 06:55:53.165766 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:53 crc kubenswrapper[4940]: E1126 06:55:53.165862 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.210648 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.210696 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.210712 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.210727 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.210738 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.314173 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.314244 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.314261 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.314288 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.314310 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.417811 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.417850 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.417861 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.417881 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.417893 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.521440 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.521496 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.521513 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.521540 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.521557 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.625967 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.626028 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.626071 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.626095 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.626112 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.729329 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.729464 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.729548 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.729575 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.729592 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.832082 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.832145 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.832168 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.832199 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.832220 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.836946 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.848148 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.853454 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.888407 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.910595 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.929335 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.934895 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.935098 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.935227 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.935347 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.935462 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:53Z","lastTransitionTime":"2025-11-26T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.950787 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.969813 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.985729 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:53 crc kubenswrapper[4940]: I1126 06:55:53.999394 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.012218 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.027570 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.038525 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.038605 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.038629 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.038662 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.038686 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.041971 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.068606 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.083846 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.113991 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.126520 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.141568 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.141657 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.141678 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.141704 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.141722 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.144258 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.159525 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.164646 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:54 crc kubenswrapper[4940]: E1126 06:55:54.164790 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.244863 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.244910 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.244927 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.244949 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.244970 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.351762 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.351825 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.351869 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.351978 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.352024 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.455712 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.455796 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.455817 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.455842 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.455859 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.559171 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.559225 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.559242 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.559265 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.559283 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.662500 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.662560 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.662579 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.662604 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.662623 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.765985 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.766084 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.766107 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.766134 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.766153 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.868542 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.868573 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.868581 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.868595 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.868604 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.971090 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.971150 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.971168 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.971193 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:54 crc kubenswrapper[4940]: I1126 06:55:54.971214 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:54Z","lastTransitionTime":"2025-11-26T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.073728 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.073793 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.073813 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.073838 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.073858 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.165357 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.165430 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.165616 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.165716 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.165876 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.166147 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.176282 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.176343 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.176366 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.176394 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.176417 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.278838 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.278888 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.278904 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.278926 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.278943 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.381888 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.381940 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.381957 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.381982 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.381999 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.477405 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.477462 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.477473 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.477491 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.477503 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.498454 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.504165 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.504225 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.504236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.504256 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.504270 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.522311 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.527474 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.527550 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.527566 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.527586 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.527597 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.540570 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.545269 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.545339 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.545356 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.545380 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.545397 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.561310 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.565932 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.565993 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.566008 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.566030 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.566062 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.588030 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:55 crc kubenswrapper[4940]: E1126 06:55:55.588385 4940 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.590295 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.590344 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.590359 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.590376 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.590386 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.693541 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.693609 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.693631 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.693691 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.693747 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.797706 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.797784 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.797808 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.797839 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.797870 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.901783 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.901839 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.901856 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.901879 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:55 crc kubenswrapper[4940]: I1126 06:55:55.901897 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:55Z","lastTransitionTime":"2025-11-26T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.005127 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.005199 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.005217 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.005242 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.005260 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.107991 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.108090 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.108110 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.108135 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.108155 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.165326 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:56 crc kubenswrapper[4940]: E1126 06:55:56.165545 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.211441 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.211505 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.211522 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.211545 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.211563 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.315004 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.315135 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.315161 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.315195 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.315217 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.418985 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.419092 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.419120 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.419152 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.419176 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.522557 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.522621 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.522639 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.522661 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.522681 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.626934 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.629352 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.629415 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.629436 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.629476 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.731971 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.732009 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.732020 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.732039 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.732066 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.835191 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.835253 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.835271 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.835299 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.835317 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.937992 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.938050 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.938060 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.938076 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:56 crc kubenswrapper[4940]: I1126 06:55:56.938088 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:56Z","lastTransitionTime":"2025-11-26T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.040341 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.040392 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.040403 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.040421 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.040434 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.143001 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.143070 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.143083 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.143104 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.143119 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.165636 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.165725 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.166189 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:57 crc kubenswrapper[4940]: E1126 06:55:57.166333 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:57 crc kubenswrapper[4940]: E1126 06:55:57.166647 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:57 crc kubenswrapper[4940]: E1126 06:55:57.166746 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.166755 4940 scope.go:117] "RemoveContainer" containerID="714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d" Nov 26 06:55:57 crc kubenswrapper[4940]: E1126 06:55:57.167146 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.246236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.246569 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.246584 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.246605 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.246621 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.349592 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.349682 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.349711 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.349742 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.349766 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.453499 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.453576 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.453594 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.453620 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.453639 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.557187 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.557264 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.557292 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.557323 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.557349 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.659956 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.660016 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.660025 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.660067 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.660079 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.763060 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.763094 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.763103 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.763117 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.763127 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.865451 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.865484 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.865492 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.865505 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.865514 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.968554 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.968622 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.968637 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.968664 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:57 crc kubenswrapper[4940]: I1126 06:55:57.968680 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:57Z","lastTransitionTime":"2025-11-26T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.072027 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.072087 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.072099 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.072117 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.072129 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.175179 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.175218 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.175226 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.175246 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.175257 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.278171 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.278215 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.278227 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.278246 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.278257 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.381239 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.381278 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.381290 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.381307 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.381317 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.386214 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:55:58 crc kubenswrapper[4940]: E1126 06:55:58.386751 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.387280 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:55:58 crc kubenswrapper[4940]: E1126 06:55:58.387523 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.488779 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.488828 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.488837 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.488856 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.488868 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.590919 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.590962 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.590971 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.590989 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.591027 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.694304 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.694347 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.694356 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.694377 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.694391 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.797966 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.798015 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.798031 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.798068 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.798081 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.901331 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.901399 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.901423 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.901453 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:58 crc kubenswrapper[4940]: I1126 06:55:58.901478 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:58Z","lastTransitionTime":"2025-11-26T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.004467 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.004635 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.004655 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.004676 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.004694 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.109860 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.109939 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.109962 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.109991 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.110013 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.165169 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:55:59 crc kubenswrapper[4940]: E1126 06:55:59.165300 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.165184 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:55:59 crc kubenswrapper[4940]: E1126 06:55:59.165485 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.203674 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.213314 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.213357 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.213369 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.213389 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.213401 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.228069 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.256196 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.294244 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.319946 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.324428 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.324484 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.324500 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.324526 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.324543 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.334285 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"16669bb5-486e-415d-a74e-79811d37124e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.346715 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.357021 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.385025 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.399134 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.412497 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.426263 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.427467 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.427514 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.427530 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.427549 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.427561 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.438414 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.450784 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.463671 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.482016 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.498588 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.512230 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:55:59Z is after 2025-08-24T17:21:41Z" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.530279 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.530337 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.530355 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.530379 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.530396 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.633470 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.633541 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.633559 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.633587 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.633608 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.736916 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.737139 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.737156 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.737180 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.737197 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.841090 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.841154 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.841175 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.841216 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.841245 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.943959 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.944101 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.944134 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.944165 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:55:59 crc kubenswrapper[4940]: I1126 06:55:59.944187 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:55:59Z","lastTransitionTime":"2025-11-26T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.047601 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.047654 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.047670 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.047690 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.047703 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.150960 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.151068 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.151095 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.151125 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.151146 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.164948 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.164969 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:00 crc kubenswrapper[4940]: E1126 06:56:00.165159 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:00 crc kubenswrapper[4940]: E1126 06:56:00.165334 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.261606 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.261677 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.261698 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.261723 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.261742 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.365699 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.365788 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.365806 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.365850 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.365866 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.469407 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.469506 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.469524 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.469549 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.469568 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.572782 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.572889 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.572952 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.573016 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.573085 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.676976 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.677091 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.677112 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.677141 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.677160 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.780282 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.780368 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.780391 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.780426 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.780451 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.883905 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.883972 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.883996 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.884025 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.884079 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.987690 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.987748 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.987764 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.987790 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:00 crc kubenswrapper[4940]: I1126 06:56:00.987809 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:00Z","lastTransitionTime":"2025-11-26T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.090978 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.091119 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.091145 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.091171 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.091190 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.164875 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.164948 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:01 crc kubenswrapper[4940]: E1126 06:56:01.165159 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:01 crc kubenswrapper[4940]: E1126 06:56:01.165379 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.193389 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.193446 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.193463 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.193485 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.193501 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.296734 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.296808 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.296826 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.296855 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.296874 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.399099 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.399157 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.399173 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.399195 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.399210 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.501862 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.501935 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.501954 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.501979 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.502031 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.605003 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.605085 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.605097 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.605118 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.605129 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.707466 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.707507 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.707515 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.707531 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.707540 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.810260 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.810329 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.810346 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.810373 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.810391 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.912614 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.912699 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.912724 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.912792 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:01 crc kubenswrapper[4940]: I1126 06:56:01.912817 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:01Z","lastTransitionTime":"2025-11-26T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.016133 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.016222 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.016258 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.016287 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.016308 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.118991 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.119086 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.119111 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.119141 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.119162 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.164972 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.165022 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:02 crc kubenswrapper[4940]: E1126 06:56:02.165173 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:02 crc kubenswrapper[4940]: E1126 06:56:02.165337 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.222664 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.222727 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.222744 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.222766 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.222784 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.325272 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.325335 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.325351 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.325375 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.325392 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.428236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.428268 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.428277 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.428290 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.428299 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.531014 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.531099 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.531118 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.531140 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.531155 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.633486 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.633547 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.633569 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.633597 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.633617 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.738674 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.738740 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.738761 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.739373 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.739432 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.842669 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.843006 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.843077 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.843112 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.843134 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.946805 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.946853 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.946866 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.946882 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:02 crc kubenswrapper[4940]: I1126 06:56:02.946926 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:02Z","lastTransitionTime":"2025-11-26T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.049675 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.049708 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.049718 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.049732 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.049743 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.152504 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.152541 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.152551 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.152583 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.152595 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.164979 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:03 crc kubenswrapper[4940]: E1126 06:56:03.165201 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.164994 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:03 crc kubenswrapper[4940]: E1126 06:56:03.165541 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.254924 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.254957 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.254965 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.254992 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.255003 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.358315 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.358357 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.358369 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.358385 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.358396 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.461338 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.461380 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.461389 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.461404 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.461416 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.563542 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.563589 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.563600 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.563616 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.563626 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.666090 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.666157 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.666180 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.666208 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.666233 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.768553 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.768594 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.768605 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.768619 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.768628 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.870796 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.870835 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.870843 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.870857 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.870868 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.973851 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.973899 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.973907 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.973921 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:03 crc kubenswrapper[4940]: I1126 06:56:03.973930 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:03Z","lastTransitionTime":"2025-11-26T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.076891 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.076927 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.076935 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.076974 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.077018 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.164556 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.164556 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:04 crc kubenswrapper[4940]: E1126 06:56:04.164742 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:04 crc kubenswrapper[4940]: E1126 06:56:04.164799 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.180150 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.180225 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.180248 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.180281 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.180307 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.283464 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.283528 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.283547 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.283580 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.283601 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.386501 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.386559 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.386571 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.386593 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.386606 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.489830 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.489887 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.489899 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.489916 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.489936 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.592354 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.592523 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.592540 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.592555 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.592566 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.696185 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.696265 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.696279 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.696309 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.696374 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.799903 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.799999 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.800012 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.800099 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.800135 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.903112 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.903154 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.903163 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.903178 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:04 crc kubenswrapper[4940]: I1126 06:56:04.903187 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:04Z","lastTransitionTime":"2025-11-26T06:56:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.006653 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.006701 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.006710 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.006725 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.006735 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.109074 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.109122 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.109132 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.109149 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.109158 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.165486 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.165495 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.165718 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.165859 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.211968 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.212030 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.212080 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.212112 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.212137 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.315225 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.315408 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.315419 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.315449 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.315641 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.358011 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.358162 4940 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.358218 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs podName:b1a21dbc-3d52-4bc2-805b-65dc954babce nodeName:}" failed. No retries permitted until 2025-11-26 06:56:37.358202562 +0000 UTC m=+98.878344181 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs") pod "network-metrics-daemon-nfh6j" (UID: "b1a21dbc-3d52-4bc2-805b-65dc954babce") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.417965 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.417999 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.418009 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.418026 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.418053 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.520423 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.520477 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.520490 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.520512 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.520525 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.623897 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.624079 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.624112 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.624143 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.624165 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.726891 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.726974 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.727000 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.727034 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.727096 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.807439 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.807486 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.807501 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.807524 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.807541 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.824963 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.829032 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.829088 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.829097 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.829111 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.829120 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.847445 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.852524 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.852590 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.852607 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.852627 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.852640 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.864821 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.867969 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.868017 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.868028 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.868056 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.868066 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.880142 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.883388 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.883422 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.883436 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.883453 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.883466 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.900390 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:05 crc kubenswrapper[4940]: E1126 06:56:05.900557 4940 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.902102 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.902127 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.902141 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.902160 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:05 crc kubenswrapper[4940]: I1126 06:56:05.902172 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:05Z","lastTransitionTime":"2025-11-26T06:56:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.004818 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.004866 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.004880 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.004897 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.004910 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.107199 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.107261 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.107275 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.107296 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.107310 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.164665 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.164661 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:06 crc kubenswrapper[4940]: E1126 06:56:06.164808 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:06 crc kubenswrapper[4940]: E1126 06:56:06.164982 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.210584 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.210676 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.210694 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.210719 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.210735 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.312605 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.312642 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.312650 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.312667 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.312676 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.414888 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.414943 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.414961 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.414981 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.414996 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.517069 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.517115 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.517127 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.517147 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.517161 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.620714 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.620776 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.620795 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.620821 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.620838 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.724147 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.724195 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.724208 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.724226 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.724237 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.831515 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.831578 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.831604 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.831626 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.831641 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.934639 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.934675 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.934684 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.934698 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:06 crc kubenswrapper[4940]: I1126 06:56:06.934708 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:06Z","lastTransitionTime":"2025-11-26T06:56:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.037907 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.037980 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.037989 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.038005 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.038017 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.140890 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.140932 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.140943 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.140958 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.140970 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.165500 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.165574 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:07 crc kubenswrapper[4940]: E1126 06:56:07.165666 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:07 crc kubenswrapper[4940]: E1126 06:56:07.165807 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.243845 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.243933 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.243957 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.243985 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.244005 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.347078 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.347137 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.347149 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.347169 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.347182 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.450540 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.450593 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.450609 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.450631 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.450649 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.554005 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.554093 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.554110 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.554133 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.554151 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.632547 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gnvm5_c9ec0fa1-713d-4824-9a3a-a20eff8c65e0/kube-multus/0.log" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.632620 4940 generic.go:334] "Generic (PLEG): container finished" podID="c9ec0fa1-713d-4824-9a3a-a20eff8c65e0" containerID="4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb" exitCode=1 Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.632660 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gnvm5" event={"ID":"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0","Type":"ContainerDied","Data":"4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.633221 4940 scope.go:117] "RemoveContainer" containerID="4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.645993 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"16669bb5-486e-415d-a74e-79811d37124e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.661359 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.661745 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.661760 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.661784 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.661798 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.661431 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.721695 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.740610 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.757549 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.764739 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.764780 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.764789 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.764804 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.764814 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.774918 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.793548 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.809793 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.822473 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.834328 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.849017 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.859753 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.867182 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.867209 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.867219 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.867234 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.867244 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.874246 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.886511 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.896445 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.909615 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.927702 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.943994 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"2025-11-26T06:55:21+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6\\\\n2025-11-26T06:55:21+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6 to /host/opt/cni/bin/\\\\n2025-11-26T06:55:22Z [verbose] multus-daemon started\\\\n2025-11-26T06:55:22Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:56:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.970295 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.970335 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.970347 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.970362 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:07 crc kubenswrapper[4940]: I1126 06:56:07.970373 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:07Z","lastTransitionTime":"2025-11-26T06:56:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.074003 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.074229 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.074259 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.074293 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.074317 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.165609 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.165629 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:08 crc kubenswrapper[4940]: E1126 06:56:08.165758 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:08 crc kubenswrapper[4940]: E1126 06:56:08.165967 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.166880 4940 scope.go:117] "RemoveContainer" containerID="714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.180549 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.180858 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.180999 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.181198 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.181360 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.185031 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.284102 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.284272 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.284363 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.284477 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.284607 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.386817 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.386849 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.386859 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.386874 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.386884 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.488615 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.488658 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.488670 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.488689 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.488702 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.590979 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.591021 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.591032 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.591063 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.591075 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.639154 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gnvm5_c9ec0fa1-713d-4824-9a3a-a20eff8c65e0/kube-multus/0.log" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.639274 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gnvm5" event={"ID":"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0","Type":"ContainerStarted","Data":"9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.641630 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/2.log" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.645976 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.646626 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.668113 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.693221 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.697900 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.697955 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.697968 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.697992 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.698006 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.710772 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"2025-11-26T06:55:21+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6\\\\n2025-11-26T06:55:21+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6 to /host/opt/cni/bin/\\\\n2025-11-26T06:55:22Z [verbose] multus-daemon started\\\\n2025-11-26T06:55:22Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:56:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.720530 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"061e23f2-3a10-4603-9cc7-b038e8aedc8d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3131c0c473bf5ea3e9ba041ef10dd564f1af25aa7c97b9ea2da10e9e98f8ef17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.733510 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.743715 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.762851 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"16669bb5-486e-415d-a74e-79811d37124e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.790705 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.800540 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.800604 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.800623 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.800644 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.800663 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.809153 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.822637 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.842112 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.855713 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.871760 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.885108 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.899984 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.902292 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.902404 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.902494 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.902586 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.902682 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:08Z","lastTransitionTime":"2025-11-26T06:56:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.911946 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.926346 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.938995 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.953635 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.966243 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"16669bb5-486e-415d-a74e-79811d37124e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.981508 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:08 crc kubenswrapper[4940]: I1126 06:56:08.992603 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:08Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.005085 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.005126 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.005137 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.005154 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.005165 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.014457 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.028693 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.040864 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.056874 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.068572 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.078299 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.095333 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.108139 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.108177 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.108186 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.108204 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.108216 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.108724 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.122197 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.133260 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.152325 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:56:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.164913 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.164928 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:09 crc kubenswrapper[4940]: E1126 06:56:09.165031 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:09 crc kubenswrapper[4940]: E1126 06:56:09.165327 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.165848 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"2025-11-26T06:55:21+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6\\\\n2025-11-26T06:55:21+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6 to /host/opt/cni/bin/\\\\n2025-11-26T06:55:22Z [verbose] multus-daemon started\\\\n2025-11-26T06:55:22Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:56:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.175865 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"061e23f2-3a10-4603-9cc7-b038e8aedc8d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3131c0c473bf5ea3e9ba041ef10dd564f1af25aa7c97b9ea2da10e9e98f8ef17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.189373 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.198498 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.210855 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.210894 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.210904 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.210919 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.210929 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.211619 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.221546 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"16669bb5-486e-415d-a74e-79811d37124e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.233675 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.248128 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.258190 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.271250 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.281692 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.291180 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.300054 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.309103 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.313371 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.313427 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.313439 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.313461 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.313475 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.320835 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.334663 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.346424 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.358480 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.370686 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"2025-11-26T06:55:21+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6\\\\n2025-11-26T06:55:21+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6 to /host/opt/cni/bin/\\\\n2025-11-26T06:55:22Z [verbose] multus-daemon started\\\\n2025-11-26T06:55:22Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:56:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.381539 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"061e23f2-3a10-4603-9cc7-b038e8aedc8d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3131c0c473bf5ea3e9ba041ef10dd564f1af25aa7c97b9ea2da10e9e98f8ef17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.396554 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.408732 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.418831 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.418875 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.418887 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.418906 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.418920 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.424958 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.453088 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:56:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.522264 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.522301 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.522310 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.522324 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.522334 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.624710 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.624767 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.624784 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.624807 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.624822 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.650538 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/3.log" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.651131 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/2.log" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.653626 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" exitCode=1 Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.653679 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.653725 4940 scope.go:117] "RemoveContainer" containerID="714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.654504 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 06:56:09 crc kubenswrapper[4940]: E1126 06:56:09.654833 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.668231 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"061e23f2-3a10-4603-9cc7-b038e8aedc8d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3131c0c473bf5ea3e9ba041ef10dd564f1af25aa7c97b9ea2da10e9e98f8ef17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.688683 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.700648 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.723456 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.726716 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.726743 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.726755 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.726771 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.726782 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.742378 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://714d9107deb836d17784a570b881f2525cfed1b51bad35a1e112ca4b192b8a5d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:55:43Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:55:43.229681 6602 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 06:55:43.229716 6602 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 06:55:43.229723 6602 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 06:55:43.229743 6602 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 06:55:43.229744 6602 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 06:55:43.229754 6602 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 06:55:43.229775 6602 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 06:55:43.229809 6602 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 06:55:43.229821 6602 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 06:55:43.229844 6602 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 06:55:43.229849 6602 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 06:55:43.229850 6602 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 06:55:43.229864 6602 factory.go:656] Stopping watch factory\\\\nI1126 06:55:43.229865 6602 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 06:55:43.229879 6602 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:09Z\\\",\\\"message\\\":\\\"er: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:56:09.091824 6933 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/certified-operators]} name:Service_openshift-marketplace/certified-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.214:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {20da2226-531c-4179-9810-aa4026995ca3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:56:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.754614 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"2025-11-26T06:55:21+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6\\\\n2025-11-26T06:55:21+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6 to /host/opt/cni/bin/\\\\n2025-11-26T06:55:22Z [verbose] multus-daemon started\\\\n2025-11-26T06:55:22Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:56:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.764095 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"16669bb5-486e-415d-a74e-79811d37124e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.775267 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.794465 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.805786 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.855201 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.855240 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.855251 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.855269 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.855280 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.858619 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.875735 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.885680 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.896464 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.905917 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.917990 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.933990 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.946668 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.957178 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.957574 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.957603 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.957613 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.957630 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:09 crc kubenswrapper[4940]: I1126 06:56:09.957642 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:09Z","lastTransitionTime":"2025-11-26T06:56:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.061410 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.061468 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.061485 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.061508 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.061526 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.164215 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.164286 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.164301 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.164322 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.164341 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.164487 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.164615 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:10 crc kubenswrapper[4940]: E1126 06:56:10.164769 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:10 crc kubenswrapper[4940]: E1126 06:56:10.164860 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.267510 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.267549 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.267558 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.267573 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.267582 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.371073 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.371162 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.371178 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.371200 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.371212 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.474579 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.474651 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.474666 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.474685 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.474697 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.577531 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.577599 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.577616 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.577641 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.577658 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.661991 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/3.log" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.667854 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 06:56:10 crc kubenswrapper[4940]: E1126 06:56:10.668497 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.679826 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.679867 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.679880 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.679898 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.679911 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.703297 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.720523 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.737321 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.755702 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.770275 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.782323 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.782366 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.782385 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.782408 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.782426 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.785848 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.800976 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.815910 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.832312 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.847952 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.864271 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.881699 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"2025-11-26T06:55:21+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6\\\\n2025-11-26T06:55:21+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6 to /host/opt/cni/bin/\\\\n2025-11-26T06:55:22Z [verbose] multus-daemon started\\\\n2025-11-26T06:55:22Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:56:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.884398 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.884490 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.884509 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.884533 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.884552 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.895739 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"061e23f2-3a10-4603-9cc7-b038e8aedc8d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3131c0c473bf5ea3e9ba041ef10dd564f1af25aa7c97b9ea2da10e9e98f8ef17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.912982 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.925200 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.943946 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.973571 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:09Z\\\",\\\"message\\\":\\\"er: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:56:09.091824 6933 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/certified-operators]} name:Service_openshift-marketplace/certified-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.214:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {20da2226-531c-4179-9810-aa4026995ca3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:56:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.986229 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"16669bb5-486e-415d-a74e-79811d37124e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.987302 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.987374 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.987392 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.987415 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.987431 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:10Z","lastTransitionTime":"2025-11-26T06:56:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:10 crc kubenswrapper[4940]: I1126 06:56:10.998678 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.090134 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.090192 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.090206 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.090225 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.090236 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.165126 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.165126 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:11 crc kubenswrapper[4940]: E1126 06:56:11.165282 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:11 crc kubenswrapper[4940]: E1126 06:56:11.165332 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.192277 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.192323 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.192335 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.192357 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.192370 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.294443 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.294497 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.294510 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.294529 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.294568 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.397266 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.397327 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.397344 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.397372 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.397389 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.500112 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.500152 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.500162 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.500181 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.500192 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.604120 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.604177 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.604193 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.604215 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.604236 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.706244 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.706311 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.706334 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.706367 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.706389 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.808929 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.809409 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.809432 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.809456 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.809476 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.911152 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.911178 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.911186 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.911212 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:11 crc kubenswrapper[4940]: I1126 06:56:11.911220 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:11Z","lastTransitionTime":"2025-11-26T06:56:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.013587 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.013648 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.013666 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.014102 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.014162 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.116536 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.116582 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.116593 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.116609 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.116621 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.165422 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.165448 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:12 crc kubenswrapper[4940]: E1126 06:56:12.165624 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:12 crc kubenswrapper[4940]: E1126 06:56:12.165792 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.219983 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.220074 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.220097 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.220126 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.220152 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.322320 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.322369 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.322382 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.322399 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.322417 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.425427 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.425478 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.425489 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.425508 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.425519 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.528747 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.528782 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.528797 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.528817 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.528831 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.631448 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.631510 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.631521 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.631542 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.631556 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.735466 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.735532 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.735550 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.735574 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.735591 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.838632 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.838683 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.838701 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.838723 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.838741 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.942955 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.943014 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.943031 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.943083 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:12 crc kubenswrapper[4940]: I1126 06:56:12.943102 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:12Z","lastTransitionTime":"2025-11-26T06:56:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.045770 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.045822 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.045840 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.045865 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.045883 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.149622 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.149776 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.149862 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.150652 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.150721 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.165034 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:13 crc kubenswrapper[4940]: E1126 06:56:13.165276 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.165823 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:13 crc kubenswrapper[4940]: E1126 06:56:13.165965 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.254090 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.254135 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.254151 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.254175 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.254193 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.357101 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.357130 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.357138 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.357170 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.357178 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.459756 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.459801 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.459814 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.459835 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.459848 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.562883 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.562930 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.562943 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.562960 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.562970 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.665684 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.665750 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.665768 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.665792 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.665811 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.768605 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.768677 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.768695 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.768723 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.768741 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.872171 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.872236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.872253 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.872277 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.872296 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.975341 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.975389 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.975400 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.975418 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:13 crc kubenswrapper[4940]: I1126 06:56:13.975428 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:13Z","lastTransitionTime":"2025-11-26T06:56:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.078482 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.078538 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.078556 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.078578 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.078595 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.164850 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.164925 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:14 crc kubenswrapper[4940]: E1126 06:56:14.165032 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:14 crc kubenswrapper[4940]: E1126 06:56:14.165327 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.181392 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.181458 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.181482 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.181510 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.181533 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.284160 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.284200 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.284212 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.284228 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.284243 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.387231 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.387301 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.387320 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.387347 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.387369 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.490454 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.490529 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.490554 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.490584 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.490605 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.593439 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.593467 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.593476 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.593490 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.593499 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.695884 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.695945 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.695960 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.695977 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.695988 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.798633 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.798699 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.798716 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.798742 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.798759 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.901746 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.901803 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.901822 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.901847 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:14 crc kubenswrapper[4940]: I1126 06:56:14.901864 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:14Z","lastTransitionTime":"2025-11-26T06:56:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.005565 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.005640 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.005666 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.005699 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.005725 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.109264 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.109333 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.109351 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.109377 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.109396 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.165001 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.165122 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:15 crc kubenswrapper[4940]: E1126 06:56:15.165257 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:15 crc kubenswrapper[4940]: E1126 06:56:15.165451 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.213241 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.213309 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.213326 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.213351 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.213381 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.316955 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.317145 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.317188 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.317215 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.317233 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.420914 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.420982 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.421010 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.421080 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.421107 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.524400 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.524447 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.524463 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.524483 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.524498 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.628660 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.628750 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.628769 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.628795 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.628818 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.733250 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.733374 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.733399 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.733428 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.733450 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.836657 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.836722 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.836740 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.836765 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.836783 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.940682 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.940768 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.940788 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.940815 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:15 crc kubenswrapper[4940]: I1126 06:56:15.940841 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:15Z","lastTransitionTime":"2025-11-26T06:56:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.045069 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.045218 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.045241 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.045282 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.045302 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.068069 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.068145 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.068168 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.068198 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.068215 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: E1126 06:56:16.089364 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:16Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.094928 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.094983 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.095000 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.095026 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.095067 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: E1126 06:56:16.121648 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:16Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.127618 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.127688 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.127712 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.127745 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.127767 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: E1126 06:56:16.150190 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:16Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.157120 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.157221 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.157240 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.157295 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.157315 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.164650 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:16 crc kubenswrapper[4940]: E1126 06:56:16.164870 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.164984 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:16 crc kubenswrapper[4940]: E1126 06:56:16.165295 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:16 crc kubenswrapper[4940]: E1126 06:56:16.179631 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:16Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.185070 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.185117 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.185134 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.185156 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.185174 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: E1126 06:56:16.205503 4940 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9820c585-3300-4e94-8fad-73afaec61623\\\",\\\"systemUUID\\\":\\\"54d650ff-21de-4a69-b96e-f42595cf8fe0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:16Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:16 crc kubenswrapper[4940]: E1126 06:56:16.205741 4940 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.208083 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.208155 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.208182 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.208214 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.208238 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.311407 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.311496 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.311518 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.311548 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.311570 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.414180 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.414246 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.414264 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.414289 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.414307 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.517754 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.517834 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.517857 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.517890 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.517912 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.621414 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.621492 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.621518 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.621549 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.621572 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.724818 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.724866 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.724884 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.724910 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.724929 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.828671 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.828710 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.828728 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.828750 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.828768 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.930953 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.931031 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.931063 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.931086 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:16 crc kubenswrapper[4940]: I1126 06:56:16.931103 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:16Z","lastTransitionTime":"2025-11-26T06:56:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.034159 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.034224 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.034233 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.034253 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.034264 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.137844 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.137955 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.137982 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.138014 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.138083 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.165425 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.165548 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:17 crc kubenswrapper[4940]: E1126 06:56:17.165652 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:17 crc kubenswrapper[4940]: E1126 06:56:17.165846 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.241463 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.241526 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.241543 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.241567 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.241585 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.345009 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.345128 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.345156 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.345191 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.345213 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.448773 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.448870 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.448902 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.448937 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.448960 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.552248 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.552313 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.552330 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.552356 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.552376 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.655161 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.655238 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.655254 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.655277 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.655294 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.757317 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.757375 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.757391 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.757412 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.757428 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.860575 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.860651 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.860676 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.860707 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.860730 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.964504 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.964571 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.964589 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.964613 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:17 crc kubenswrapper[4940]: I1126 06:56:17.964634 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:17Z","lastTransitionTime":"2025-11-26T06:56:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.073140 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.073208 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.073231 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.073259 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.073287 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.165431 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.165445 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:18 crc kubenswrapper[4940]: E1126 06:56:18.165648 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:18 crc kubenswrapper[4940]: E1126 06:56:18.165882 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.177301 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.177360 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.177381 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.177409 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.177441 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.280942 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.281013 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.281033 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.281131 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.281157 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.384664 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.384733 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.384743 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.384782 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.384797 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.488381 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.488438 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.488457 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.488483 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.488537 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.591868 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.591938 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.591958 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.591984 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.592003 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.696112 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.696153 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.696164 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.696177 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.696350 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.799785 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.799831 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.799847 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.799869 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.799887 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.903024 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.903116 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.903133 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.903162 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:18 crc kubenswrapper[4940]: I1126 06:56:18.903179 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:18Z","lastTransitionTime":"2025-11-26T06:56:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.007190 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.007248 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.007259 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.007296 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.007307 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.127651 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.127740 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.127765 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.127801 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.127825 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.165001 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.165111 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:19 crc kubenswrapper[4940]: E1126 06:56:19.165250 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:19 crc kubenswrapper[4940]: E1126 06:56:19.165343 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.195800 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"81e88d34-4756-496e-b6ba-1223c57f8c60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f79114f1fb468391a5bfa887aea7916f448223e521942ce48186f231a47bd4dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9987ab02a76f62636494b63462e11b2362251e87ee00799886db7ef506a95ae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2a9d51607e16253b6c5f78d675261b643403cb3281d6dfa9b50701301ab113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b36f6e5ab2b522248ce22ec78d717b3bd191e691b220449cff176f3a1a2b000\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fae27d2ce5546a7cdd9110a64b0467d1e4fbd705dbd6a10a27dc1d39e7bc62d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1897dbad0b0711310b775f99f5cb8bbc3825f6c88c3fa057a5c49ec88e01c6e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f1e7682fae284e789b50c0592464af5ad1b9e6bab7ee9a1473cbd60643d5750\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d5f3eec5b761c2880489fe950637ad137e8c2105e4c743412de2e464cb7d32dd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.209789 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.221763 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ece8f4c4fb79a634bc9e615cbd4b05436a2e9bb80a753853ecc4646bd6ebfe8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2ec8c108f9c73a921c5cc1f14243166a5a67690de95269413cd8bd375434aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.230388 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.230449 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.230467 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.230495 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.230514 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.234416 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.246259 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-h7pkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ea5d7d28-22c3-4381-8206-f4853eca74cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b755ad236fb1b41d3265d867f168bf34f7ad3c64d33267e15cdacc586f41c200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8kbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-h7pkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.259473 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b1a21dbc-3d52-4bc2-805b-65dc954babce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnd9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:33Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nfh6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.275889 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff9f7028-e500-45bf-930e-c12e723bd75b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800cf78ee42be00bfb5657243d7d3505f358fc46260312acdec370ffa0ab649a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee0f04c13089174de98427ad4cd255a75e39296e37677477a3153a1ba6036df6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://663780484cdc7ac64f350a3189ef2fe5c1cbcc5695c9e6848881d035c48548b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.288575 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df78e7acb3883d7a6db3b7fa87ae406b82b923b6a74a6742c8cbc95e1c4f3cbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.300433 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.311677 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7642fd3eb3565ed334f692444f212653a3ed69b2adb810ce5b2b4f66dcf296b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nptlg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kbfvm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.322834 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f62b1af5-c3ca-4cda-be70-b71166bd9552\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://431db5b29088231d4fdc7bd59ee5f1d07ec4c6cd0855bd7607ed12b931977468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c9fbb7a33bd1e985fc902a94341c22374824b122c66c227cdc65f65636c2415\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzg4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:31Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7v8p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.333789 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.333831 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.333840 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.333856 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.333866 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.337624 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gnvm5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:07Z\\\",\\\"message\\\":\\\"2025-11-26T06:55:21+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6\\\\n2025-11-26T06:55:21+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_868f98c2-bc91-4bcc-88f4-e9c147aba9e6 to /host/opt/cni/bin/\\\\n2025-11-26T06:55:22Z [verbose] multus-daemon started\\\\n2025-11-26T06:55:22Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:56:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5299\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gnvm5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.348377 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"061e23f2-3a10-4603-9cc7-b038e8aedc8d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3131c0c473bf5ea3e9ba041ef10dd564f1af25aa7c97b9ea2da10e9e98f8ef17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a33e80caf28d98f990f5a37ff5c4ca6f37b3165cdb2eab5a8c9c2207149abf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.366694 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d760c346-dd1e-45e8-ae78-b53338b7d1eb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:55:18.841561 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:55:18.841701 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:55:18.842595 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3442004484/tls.crt::/tmp/serving-cert-3442004484/tls.key\\\\\\\"\\\\nI1126 06:55:19.149112 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:55:19.154522 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:55:19.154560 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:55:19.154592 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:55:19.154602 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:55:19.163617 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:55:19.163745 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163781 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:55:19.163806 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:55:19.163828 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:55:19.163855 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:55:19.163883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:55:19.163662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:55:19.165347 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:02Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.381427 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kfhtm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a323006-557b-442b-82ce-595f2f77b1f2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://444a43077a79102dafac263f3858d2713283ffdf96358ee57387b02e6cfe7749\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5jblj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kfhtm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.411907 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f0d793-d867-4295-a64f-bfbcb7ad8322\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cec23f4ee5a08d9627e623a3a92aa8c73a86887a0bde9aa1923721bcde48320\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29046649b38888ec2c86a9b96a0ff90b77e3d33986768cd55cd766d0d35f24c9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36d1e8890e68eeb502db068b68732ca01f9da220abb08cbc301666a612153b98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47edab60acd5e4b6cd8f7b3b8c22a686738eaede08dba6213a3ddee1d735d295\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://780f2032044ca73104579b15ea403569a2eb96cc23eb74e0f433ca0b969f0925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bcb43bf012591783396efd14220121cfdb3ed66d4f5536e4047acc78af47a26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://481c536293858fe59c883ad1e6fa87d56843eebb631366e648b85d361d22b146\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2k2wx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-x5j9z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.438160 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.438224 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.438242 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.438268 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.438286 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.439215 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c1adb3-d9e7-4302-89d2-60745597f2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:19Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:56:09Z\\\",\\\"message\\\":\\\"er: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:09Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:56:09.091824 6933 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-marketplace/certified-operators]} name:Service_openshift-marketplace/certified-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.214:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {20da2226-531c-4179-9810-aa4026995ca3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:56:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmgxv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:55:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lj789\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.456612 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"16669bb5-486e-415d-a74e-79811d37124e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:54:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e35351bb00a4e3967b8e231abefe4fd7dcebeeaec4134fa9c2c64736c4c571d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3adb44cf44910cf135dc82e93c01bb562333343454ab097c8cb104f6389f05c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://46bb7ff4d492f5a8823382a2d9f29f363dfd55c670fda0036fd3a68aa3c577fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f48ab2599c7dd2828c1cb28a820101f074ba946283f10ba8ab4548dd6b18332\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:55:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:55:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:54:59Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.477378 4940 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:55:20Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e10ea2663f42d83168cf554323158b9039dcd2475350e3a1be01de5cb1f1e9a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:55:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:56:19Z is after 2025-08-24T17:21:41Z" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.540815 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.540849 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.540860 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.540876 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.540888 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.643680 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.643753 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.643785 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.643817 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.643841 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.746786 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.746867 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.746889 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.746917 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.746940 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.850083 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.850176 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.850206 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.850239 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.850262 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.953827 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.953903 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.953927 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.953956 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:19 crc kubenswrapper[4940]: I1126 06:56:19.953978 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:19Z","lastTransitionTime":"2025-11-26T06:56:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.057970 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.058107 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.058129 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.058200 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.058219 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.162705 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.162804 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.162821 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.162846 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.162865 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.165353 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.165358 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:20 crc kubenswrapper[4940]: E1126 06:56:20.165588 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:20 crc kubenswrapper[4940]: E1126 06:56:20.165735 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.266514 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.266587 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.266604 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.266627 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.266644 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.369949 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.369998 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.370015 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.370071 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.370092 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.472207 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.472251 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.472264 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.472282 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.472297 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.575863 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.575930 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.575954 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.575984 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.576006 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.679289 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.679374 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.679395 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.679422 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.679439 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.782283 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.782316 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.782325 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.782352 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.782361 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.885016 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.885105 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.885132 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.885160 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.885177 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.988463 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.988521 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.988539 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.988563 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:20 crc kubenswrapper[4940]: I1126 06:56:20.988581 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:20Z","lastTransitionTime":"2025-11-26T06:56:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.092011 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.092103 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.092123 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.092148 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.092165 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.165524 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:21 crc kubenswrapper[4940]: E1126 06:56:21.165720 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.165564 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:21 crc kubenswrapper[4940]: E1126 06:56:21.166156 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.194457 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.194530 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.194556 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.194585 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.194607 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.296943 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.297002 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.297019 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.297075 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.297099 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.400733 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.400785 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.400802 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.400826 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.400844 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.504998 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.505122 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.505161 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.505192 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.505219 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.607758 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.607814 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.607832 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.607854 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.607871 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.710922 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.710992 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.711015 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.711073 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.711098 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.813989 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.814078 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.814102 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.814129 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.814243 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.916909 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.917318 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.917520 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.917725 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:21 crc kubenswrapper[4940]: I1126 06:56:21.917941 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:21Z","lastTransitionTime":"2025-11-26T06:56:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.021269 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.021639 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.021825 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.022132 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.022335 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.126284 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.126347 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.126365 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.126388 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.126407 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.165255 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.165256 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:22 crc kubenswrapper[4940]: E1126 06:56:22.165828 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:22 crc kubenswrapper[4940]: E1126 06:56:22.165980 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.229458 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.229523 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.229548 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.229582 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.229606 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.332410 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.332470 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.332496 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.332519 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.332535 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.435973 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.436072 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.436098 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.436123 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.436143 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.540117 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.540196 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.540219 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.540248 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.540273 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.643261 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.643529 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.643557 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.643589 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.643731 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.746830 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.746905 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.746921 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.746949 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.746969 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.849813 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.849858 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.849868 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.849889 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.849902 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.953643 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.953709 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.953777 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.953807 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:22 crc kubenswrapper[4940]: I1126 06:56:22.953823 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:22Z","lastTransitionTime":"2025-11-26T06:56:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.056676 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.056746 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.056761 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.056784 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.056806 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.072569 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.072722 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.072770 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.072857 4940 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.072876 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:27.072830216 +0000 UTC m=+148.592971865 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.072928 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:57:27.072912039 +0000 UTC m=+148.593053698 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.073000 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.073113 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073165 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073230 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073240 4940 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073248 4940 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073305 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:57:27.07327765 +0000 UTC m=+148.593419309 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073353 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:57:27.073318112 +0000 UTC m=+148.593459731 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073506 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073538 4940 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073559 4940 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.073636 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:57:27.07361791 +0000 UTC m=+148.593759559 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.160607 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.160729 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.160747 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.160771 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.160788 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.165309 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.165453 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.165533 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:23 crc kubenswrapper[4940]: E1126 06:56:23.165649 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.263120 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.263179 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.263195 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.263219 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.263236 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.367400 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.367460 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.367477 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.367501 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.367520 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.470626 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.470694 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.470712 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.470738 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.470757 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.576398 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.576456 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.576474 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.576500 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.576518 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.679671 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.679771 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.679799 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.679829 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.679852 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.782497 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.782555 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.782566 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.782583 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.782593 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.885323 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.885368 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.885380 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.885399 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.885418 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.988847 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.988915 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.988939 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.988974 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:23 crc kubenswrapper[4940]: I1126 06:56:23.988997 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:23Z","lastTransitionTime":"2025-11-26T06:56:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.092527 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.092612 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.092633 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.092662 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.092680 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.165380 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.165483 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:24 crc kubenswrapper[4940]: E1126 06:56:24.165649 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:24 crc kubenswrapper[4940]: E1126 06:56:24.165814 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.195985 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.196116 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.196144 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.196178 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.196196 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.300076 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.300151 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.300169 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.300200 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.300227 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.403918 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.404009 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.404068 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.404103 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.404123 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.506627 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.506705 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.506728 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.506755 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.506774 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.609794 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.609854 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.609867 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.609890 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.609907 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.715236 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.715332 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.715366 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.715394 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.715417 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.818706 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.818800 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.818824 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.818857 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.818880 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.922277 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.922361 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.922386 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.922430 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:24 crc kubenswrapper[4940]: I1126 06:56:24.922460 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:24Z","lastTransitionTime":"2025-11-26T06:56:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.026320 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.026406 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.026432 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.026471 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.026496 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.128911 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.128998 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.129017 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.129078 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.129101 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.164676 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.164976 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:25 crc kubenswrapper[4940]: E1126 06:56:25.165349 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:25 crc kubenswrapper[4940]: E1126 06:56:25.165962 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.166559 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 06:56:25 crc kubenswrapper[4940]: E1126 06:56:25.166863 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.232617 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.232684 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.232704 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.232730 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.232749 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.336420 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.336484 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.336499 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.336531 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.336548 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.439881 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.439945 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.439957 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.439978 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.439999 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.543330 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.543387 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.543406 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.543441 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.543458 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.646754 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.646801 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.646817 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.646840 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.646855 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.749189 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.749234 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.749250 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.749272 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.749286 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.851959 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.852020 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.852034 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.852115 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.852133 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.955932 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.956315 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.956333 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.956356 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:25 crc kubenswrapper[4940]: I1126 06:56:25.956375 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:25Z","lastTransitionTime":"2025-11-26T06:56:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.060573 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.060627 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.060640 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.060657 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.060672 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:26Z","lastTransitionTime":"2025-11-26T06:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.163295 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.163367 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.163392 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.163425 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.163449 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:26Z","lastTransitionTime":"2025-11-26T06:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.164784 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.164868 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:26 crc kubenswrapper[4940]: E1126 06:56:26.165158 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:26 crc kubenswrapper[4940]: E1126 06:56:26.165248 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.266073 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.266119 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.266130 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.266149 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.266161 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:26Z","lastTransitionTime":"2025-11-26T06:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.369143 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.369204 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.369235 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.369256 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.369269 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:26Z","lastTransitionTime":"2025-11-26T06:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.413442 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.413506 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.413522 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.413545 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.413560 4940 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:56:26Z","lastTransitionTime":"2025-11-26T06:56:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.484502 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb"] Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.485431 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.488657 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.488951 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.489159 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.492147 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.559152 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podStartSLOduration=67.559128059 podStartE2EDuration="1m7.559128059s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.542587212 +0000 UTC m=+88.062728851" watchObservedRunningTime="2025-11-26 06:56:26.559128059 +0000 UTC m=+88.079269678" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.577423 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7v8p4" podStartSLOduration=66.577406963 podStartE2EDuration="1m6.577406963s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.559391078 +0000 UTC m=+88.079532697" watchObservedRunningTime="2025-11-26 06:56:26.577406963 +0000 UTC m=+88.097548582" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.610733 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=68.610705774 podStartE2EDuration="1m8.610705774s" podCreationTimestamp="2025-11-26 06:55:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.61028221 +0000 UTC m=+88.130423839" watchObservedRunningTime="2025-11-26 06:56:26.610705774 +0000 UTC m=+88.130847413" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.615911 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1faead1d-d458-48ac-a793-5921e6e6c53e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.615957 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1faead1d-d458-48ac-a793-5921e6e6c53e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.615982 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1faead1d-d458-48ac-a793-5921e6e6c53e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.616004 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1faead1d-d458-48ac-a793-5921e6e6c53e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.616164 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1faead1d-d458-48ac-a793-5921e6e6c53e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.644711 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=67.644694128 podStartE2EDuration="1m7.644694128s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.643921463 +0000 UTC m=+88.164063092" watchObservedRunningTime="2025-11-26 06:56:26.644694128 +0000 UTC m=+88.164835757" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.694768 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-kfhtm" podStartSLOduration=67.694745913 podStartE2EDuration="1m7.694745913s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.66672533 +0000 UTC m=+88.186866959" watchObservedRunningTime="2025-11-26 06:56:26.694745913 +0000 UTC m=+88.214887532" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.694943 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-x5j9z" podStartSLOduration=67.694938779 podStartE2EDuration="1m7.694938779s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.694748633 +0000 UTC m=+88.214890262" watchObservedRunningTime="2025-11-26 06:56:26.694938779 +0000 UTC m=+88.215080398" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.717096 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1faead1d-d458-48ac-a793-5921e6e6c53e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.717130 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1faead1d-d458-48ac-a793-5921e6e6c53e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.717147 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1faead1d-d458-48ac-a793-5921e6e6c53e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.717165 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1faead1d-d458-48ac-a793-5921e6e6c53e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.717233 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1faead1d-d458-48ac-a793-5921e6e6c53e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.717291 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1faead1d-d458-48ac-a793-5921e6e6c53e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.717497 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1faead1d-d458-48ac-a793-5921e6e6c53e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.718245 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1faead1d-d458-48ac-a793-5921e6e6c53e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.726231 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1faead1d-d458-48ac-a793-5921e6e6c53e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.735837 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1faead1d-d458-48ac-a793-5921e6e6c53e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-hqntb\" (UID: \"1faead1d-d458-48ac-a793-5921e6e6c53e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.763726 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-gnvm5" podStartSLOduration=67.763700701 podStartE2EDuration="1m7.763700701s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.749891831 +0000 UTC m=+88.270033460" watchObservedRunningTime="2025-11-26 06:56:26.763700701 +0000 UTC m=+88.283842330" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.764583 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=18.764574609 podStartE2EDuration="18.764574609s" podCreationTimestamp="2025-11-26 06:56:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.763898327 +0000 UTC m=+88.284039946" watchObservedRunningTime="2025-11-26 06:56:26.764574609 +0000 UTC m=+88.284716238" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.795327 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=33.795298178 podStartE2EDuration="33.795298178s" podCreationTimestamp="2025-11-26 06:55:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.793475931 +0000 UTC m=+88.313617550" watchObservedRunningTime="2025-11-26 06:56:26.795298178 +0000 UTC m=+88.315439817" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.806824 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.864555 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-h7pkm" podStartSLOduration=67.864521945 podStartE2EDuration="1m7.864521945s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.863437921 +0000 UTC m=+88.383579550" watchObservedRunningTime="2025-11-26 06:56:26.864521945 +0000 UTC m=+88.384663564" Nov 26 06:56:26 crc kubenswrapper[4940]: I1126 06:56:26.894160 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=64.894136519 podStartE2EDuration="1m4.894136519s" podCreationTimestamp="2025-11-26 06:55:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:26.893317153 +0000 UTC m=+88.413458862" watchObservedRunningTime="2025-11-26 06:56:26.894136519 +0000 UTC m=+88.414278158" Nov 26 06:56:27 crc kubenswrapper[4940]: I1126 06:56:27.165283 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:27 crc kubenswrapper[4940]: E1126 06:56:27.165412 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:27 crc kubenswrapper[4940]: I1126 06:56:27.165620 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:27 crc kubenswrapper[4940]: E1126 06:56:27.165683 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:27 crc kubenswrapper[4940]: I1126 06:56:27.730951 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" event={"ID":"1faead1d-d458-48ac-a793-5921e6e6c53e","Type":"ContainerStarted","Data":"8c51b709ff011b696debe30e26593450a1c12aec594b1e10925318c55914d4cc"} Nov 26 06:56:27 crc kubenswrapper[4940]: I1126 06:56:27.731020 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" event={"ID":"1faead1d-d458-48ac-a793-5921e6e6c53e","Type":"ContainerStarted","Data":"1a6ecb5c1d39df2e41e51eeb044e1ed1663e62b143eac541502d26c1b7afe77f"} Nov 26 06:56:27 crc kubenswrapper[4940]: I1126 06:56:27.747308 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-hqntb" podStartSLOduration=68.747282035 podStartE2EDuration="1m8.747282035s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:27.746244122 +0000 UTC m=+89.266385761" watchObservedRunningTime="2025-11-26 06:56:27.747282035 +0000 UTC m=+89.267423664" Nov 26 06:56:28 crc kubenswrapper[4940]: I1126 06:56:28.165633 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:28 crc kubenswrapper[4940]: I1126 06:56:28.165754 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:28 crc kubenswrapper[4940]: E1126 06:56:28.166008 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:28 crc kubenswrapper[4940]: E1126 06:56:28.166144 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:29 crc kubenswrapper[4940]: I1126 06:56:29.165196 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:29 crc kubenswrapper[4940]: I1126 06:56:29.165278 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:29 crc kubenswrapper[4940]: E1126 06:56:29.169191 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:29 crc kubenswrapper[4940]: E1126 06:56:29.169329 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:30 crc kubenswrapper[4940]: I1126 06:56:30.165165 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:30 crc kubenswrapper[4940]: I1126 06:56:30.165181 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:30 crc kubenswrapper[4940]: E1126 06:56:30.165323 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:30 crc kubenswrapper[4940]: E1126 06:56:30.165446 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:31 crc kubenswrapper[4940]: I1126 06:56:31.165389 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:31 crc kubenswrapper[4940]: I1126 06:56:31.165389 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:31 crc kubenswrapper[4940]: E1126 06:56:31.165605 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:31 crc kubenswrapper[4940]: E1126 06:56:31.165729 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:32 crc kubenswrapper[4940]: I1126 06:56:32.165298 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:32 crc kubenswrapper[4940]: I1126 06:56:32.165313 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:32 crc kubenswrapper[4940]: E1126 06:56:32.165474 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:32 crc kubenswrapper[4940]: E1126 06:56:32.165677 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:33 crc kubenswrapper[4940]: I1126 06:56:33.165702 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:33 crc kubenswrapper[4940]: E1126 06:56:33.165898 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:33 crc kubenswrapper[4940]: I1126 06:56:33.165695 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:33 crc kubenswrapper[4940]: E1126 06:56:33.166297 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:34 crc kubenswrapper[4940]: I1126 06:56:34.165254 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:34 crc kubenswrapper[4940]: E1126 06:56:34.165685 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:34 crc kubenswrapper[4940]: I1126 06:56:34.165306 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:34 crc kubenswrapper[4940]: E1126 06:56:34.165922 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:35 crc kubenswrapper[4940]: I1126 06:56:35.164952 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:35 crc kubenswrapper[4940]: I1126 06:56:35.164995 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:35 crc kubenswrapper[4940]: E1126 06:56:35.165225 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:35 crc kubenswrapper[4940]: E1126 06:56:35.165466 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:36 crc kubenswrapper[4940]: I1126 06:56:36.165448 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:36 crc kubenswrapper[4940]: I1126 06:56:36.165496 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:36 crc kubenswrapper[4940]: E1126 06:56:36.166224 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:36 crc kubenswrapper[4940]: E1126 06:56:36.166419 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:36 crc kubenswrapper[4940]: I1126 06:56:36.167229 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 06:56:36 crc kubenswrapper[4940]: E1126 06:56:36.167523 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lj789_openshift-ovn-kubernetes(69c1adb3-d9e7-4302-89d2-60745597f2cc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" Nov 26 06:56:37 crc kubenswrapper[4940]: I1126 06:56:37.164788 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:37 crc kubenswrapper[4940]: E1126 06:56:37.165435 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:37 crc kubenswrapper[4940]: I1126 06:56:37.164912 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:37 crc kubenswrapper[4940]: E1126 06:56:37.166975 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:37 crc kubenswrapper[4940]: I1126 06:56:37.440999 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:37 crc kubenswrapper[4940]: E1126 06:56:37.441226 4940 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:56:37 crc kubenswrapper[4940]: E1126 06:56:37.441342 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs podName:b1a21dbc-3d52-4bc2-805b-65dc954babce nodeName:}" failed. No retries permitted until 2025-11-26 06:57:41.441312777 +0000 UTC m=+162.961454426 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs") pod "network-metrics-daemon-nfh6j" (UID: "b1a21dbc-3d52-4bc2-805b-65dc954babce") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:56:38 crc kubenswrapper[4940]: I1126 06:56:38.164543 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:38 crc kubenswrapper[4940]: E1126 06:56:38.164707 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:38 crc kubenswrapper[4940]: I1126 06:56:38.164543 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:38 crc kubenswrapper[4940]: E1126 06:56:38.165126 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:39 crc kubenswrapper[4940]: I1126 06:56:39.165406 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:39 crc kubenswrapper[4940]: I1126 06:56:39.165431 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:39 crc kubenswrapper[4940]: E1126 06:56:39.166931 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:39 crc kubenswrapper[4940]: E1126 06:56:39.167086 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:40 crc kubenswrapper[4940]: I1126 06:56:40.165118 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:40 crc kubenswrapper[4940]: I1126 06:56:40.165194 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:40 crc kubenswrapper[4940]: E1126 06:56:40.165324 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:40 crc kubenswrapper[4940]: E1126 06:56:40.165418 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:41 crc kubenswrapper[4940]: I1126 06:56:41.164912 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:41 crc kubenswrapper[4940]: I1126 06:56:41.165022 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:41 crc kubenswrapper[4940]: E1126 06:56:41.165186 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:41 crc kubenswrapper[4940]: E1126 06:56:41.165494 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:42 crc kubenswrapper[4940]: I1126 06:56:42.165675 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:42 crc kubenswrapper[4940]: E1126 06:56:42.165904 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:42 crc kubenswrapper[4940]: I1126 06:56:42.166283 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:42 crc kubenswrapper[4940]: E1126 06:56:42.166910 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:43 crc kubenswrapper[4940]: I1126 06:56:43.165359 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:43 crc kubenswrapper[4940]: I1126 06:56:43.165661 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:43 crc kubenswrapper[4940]: E1126 06:56:43.165817 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:43 crc kubenswrapper[4940]: E1126 06:56:43.165616 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:44 crc kubenswrapper[4940]: I1126 06:56:44.164929 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:44 crc kubenswrapper[4940]: I1126 06:56:44.165001 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:44 crc kubenswrapper[4940]: E1126 06:56:44.165406 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:44 crc kubenswrapper[4940]: E1126 06:56:44.165501 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:45 crc kubenswrapper[4940]: I1126 06:56:45.165318 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:45 crc kubenswrapper[4940]: E1126 06:56:45.165474 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:45 crc kubenswrapper[4940]: I1126 06:56:45.165318 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:45 crc kubenswrapper[4940]: E1126 06:56:45.165769 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:46 crc kubenswrapper[4940]: I1126 06:56:46.165397 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:46 crc kubenswrapper[4940]: I1126 06:56:46.165480 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:46 crc kubenswrapper[4940]: E1126 06:56:46.165545 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:46 crc kubenswrapper[4940]: E1126 06:56:46.165628 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:47 crc kubenswrapper[4940]: I1126 06:56:47.165106 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:47 crc kubenswrapper[4940]: I1126 06:56:47.165106 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:47 crc kubenswrapper[4940]: E1126 06:56:47.165252 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:47 crc kubenswrapper[4940]: E1126 06:56:47.165429 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:48 crc kubenswrapper[4940]: I1126 06:56:48.165132 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:48 crc kubenswrapper[4940]: I1126 06:56:48.165141 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:48 crc kubenswrapper[4940]: E1126 06:56:48.165297 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:48 crc kubenswrapper[4940]: E1126 06:56:48.165507 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:49 crc kubenswrapper[4940]: I1126 06:56:49.164651 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:49 crc kubenswrapper[4940]: I1126 06:56:49.164644 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:49 crc kubenswrapper[4940]: E1126 06:56:49.166598 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:49 crc kubenswrapper[4940]: E1126 06:56:49.167518 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:50 crc kubenswrapper[4940]: I1126 06:56:50.164625 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:50 crc kubenswrapper[4940]: I1126 06:56:50.165138 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:50 crc kubenswrapper[4940]: E1126 06:56:50.165334 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:50 crc kubenswrapper[4940]: E1126 06:56:50.165517 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:51 crc kubenswrapper[4940]: I1126 06:56:51.165694 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:51 crc kubenswrapper[4940]: I1126 06:56:51.166515 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:51 crc kubenswrapper[4940]: E1126 06:56:51.166681 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:51 crc kubenswrapper[4940]: E1126 06:56:51.167192 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:51 crc kubenswrapper[4940]: I1126 06:56:51.167216 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 06:56:51 crc kubenswrapper[4940]: I1126 06:56:51.822330 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/3.log" Nov 26 06:56:51 crc kubenswrapper[4940]: I1126 06:56:51.826643 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerStarted","Data":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} Nov 26 06:56:51 crc kubenswrapper[4940]: I1126 06:56:51.827418 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:56:51 crc kubenswrapper[4940]: I1126 06:56:51.865707 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podStartSLOduration=92.865682267 podStartE2EDuration="1m32.865682267s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:51.865223072 +0000 UTC m=+113.385364751" watchObservedRunningTime="2025-11-26 06:56:51.865682267 +0000 UTC m=+113.385823896" Nov 26 06:56:52 crc kubenswrapper[4940]: I1126 06:56:52.164947 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:52 crc kubenswrapper[4940]: I1126 06:56:52.164947 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:52 crc kubenswrapper[4940]: E1126 06:56:52.165635 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:52 crc kubenswrapper[4940]: E1126 06:56:52.165650 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:52 crc kubenswrapper[4940]: I1126 06:56:52.229413 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-nfh6j"] Nov 26 06:56:52 crc kubenswrapper[4940]: I1126 06:56:52.830656 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:52 crc kubenswrapper[4940]: E1126 06:56:52.830887 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:53 crc kubenswrapper[4940]: I1126 06:56:53.167278 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:53 crc kubenswrapper[4940]: I1126 06:56:53.167330 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:53 crc kubenswrapper[4940]: E1126 06:56:53.167487 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:56:53 crc kubenswrapper[4940]: E1126 06:56:53.167640 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.164510 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.164635 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:54 crc kubenswrapper[4940]: E1126 06:56:54.164679 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nfh6j" podUID="b1a21dbc-3d52-4bc2-805b-65dc954babce" Nov 26 06:56:54 crc kubenswrapper[4940]: E1126 06:56:54.164831 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.505665 4940 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.505817 4940 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.559445 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-bflcx"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.560227 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.561400 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.562197 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.567181 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.569722 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.570612 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.571842 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.571947 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.571847 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.571847 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.575032 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.576460 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.576584 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.577343 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.578424 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.578624 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.578771 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.579185 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9f8vx"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.579285 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.579505 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.579757 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.579513 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.579925 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.579988 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.580436 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.580541 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t5zc5"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.580754 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.583054 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ft8qp"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.583342 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.584439 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.584973 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.585268 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.587023 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-z9v5r"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.587538 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.602762 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.603211 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.603287 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.603496 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-gwrr8"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.610826 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.610928 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.610837 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.612692 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.612710 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.613245 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.614337 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.614493 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.615169 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.616144 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.616795 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.617605 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.617671 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.617747 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.617935 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618125 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618192 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.617671 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618345 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618464 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618479 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2kv29"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618653 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618718 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618964 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.618995 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.619227 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.619992 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.624310 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.624480 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.624476 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626303 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626364 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626473 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626655 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626693 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626778 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626848 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626880 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626957 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.626986 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.627059 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.627128 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.627136 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.627477 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.628350 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.628602 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.629005 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.629989 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9qfvj"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.630523 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.630760 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-jpvjs"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.630876 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.631131 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.633134 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.634021 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.634032 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.634232 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-58znw"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.634265 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.634425 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.634471 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.634739 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.634927 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.635009 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.635916 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.636321 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.637232 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.638253 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.646826 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.649488 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.649824 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.650174 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.660525 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.666945 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.667195 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.667324 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.667428 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.667631 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.667814 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.667922 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.668402 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.668834 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.669009 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.669209 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.669382 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.672915 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673460 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673492 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673498 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/40c806a8-67f5-497d-91d8-6ce10f60e79a-images\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673526 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rblz4\" (UniqueName: \"kubernetes.io/projected/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-kube-api-access-rblz4\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673548 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/60b8d39a-f9d8-423e-9a88-2842a04d96a4-node-pullsecrets\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673567 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67c31318-36b0-451f-b849-8859adaebd3f-serving-cert\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673588 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-ca\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673609 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6566c85-0449-4bdc-b9ce-3fb02da651dd-config\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673635 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-serving-cert\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673656 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2ac8adae-3926-4197-a3ff-2efa92ef82b5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-58znw\" (UID: \"2ac8adae-3926-4197-a3ff-2efa92ef82b5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673677 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-image-import-ca\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673710 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/870f8bfa-3583-43fd-b298-2f96c5a74dd4-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673736 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673756 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6566c85-0449-4bdc-b9ce-3fb02da651dd-trusted-ca\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673781 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96mrw\" (UniqueName: \"kubernetes.io/projected/c6566c85-0449-4bdc-b9ce-3fb02da651dd-kube-api-access-96mrw\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673807 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e93e70f9-30a0-4254-8cc9-2988b9028297-serving-cert\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673828 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-machine-approver-tls\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673850 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9135b1d0-1179-43b5-ae25-937d636e58dd-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673873 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9135b1d0-1179-43b5-ae25-937d636e58dd-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673896 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673920 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673944 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/67c31318-36b0-451f-b849-8859adaebd3f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673965 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92684e7e-cfae-421b-b356-5cb5d6b38879-serving-cert\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.673984 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6cht\" (UniqueName: \"kubernetes.io/projected/e93e70f9-30a0-4254-8cc9-2988b9028297-kube-api-access-t6cht\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674002 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-encryption-config\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674013 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-sb94j"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674020 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-config\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674058 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/9579b457-bdfb-49b5-b12a-c14e87593922-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674080 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9579b457-bdfb-49b5-b12a-c14e87593922-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674105 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674127 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8shw\" (UniqueName: \"kubernetes.io/projected/92684e7e-cfae-421b-b356-5cb5d6b38879-kube-api-access-p8shw\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674149 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c806a8-67f5-497d-91d8-6ce10f60e79a-config\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674170 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674190 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-client-ca\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674212 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm6cj\" (UniqueName: \"kubernetes.io/projected/7455a54f-ad8c-4bbb-8b56-4e68848ff102-kube-api-access-dm6cj\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674232 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-audit\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674265 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdk26\" (UniqueName: \"kubernetes.io/projected/60b8d39a-f9d8-423e-9a88-2842a04d96a4-kube-api-access-gdk26\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674292 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674288 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjwpg\" (UniqueName: \"kubernetes.io/projected/870f8bfa-3583-43fd-b298-2f96c5a74dd4-kube-api-access-sjwpg\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674324 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674615 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674779 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.674959 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-sb94j" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675121 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675142 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675161 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c6566c85-0449-4bdc-b9ce-3fb02da651dd-serving-cert\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675190 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ffea777-f433-4e58-a670-8f39f3db893e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675205 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69svt\" (UniqueName: \"kubernetes.io/projected/02eb3fbf-9bcf-4097-80da-07430ae0cceb-kube-api-access-69svt\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675223 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-dir\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675236 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675310 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675237 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-client-ca\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675395 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675639 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675692 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675977 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676142 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676250 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.675652 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/870f8bfa-3583-43fd-b298-2f96c5a74dd4-config\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676374 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676402 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676428 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ffea777-f433-4e58-a670-8f39f3db893e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676450 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-audit-policies\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676472 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-service-ca\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676494 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-oauth-serving-cert\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676515 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9135b1d0-1179-43b5-ae25-937d636e58dd-config\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676542 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676563 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-serving-cert\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676583 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-service-ca\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676606 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5-metrics-tls\") pod \"dns-operator-744455d44c-9qfvj\" (UID: \"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5\") " pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676627 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-config\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676646 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-etcd-client\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676677 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676700 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/40c806a8-67f5-497d-91d8-6ce10f60e79a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676728 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2frlg\" (UniqueName: \"kubernetes.io/projected/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-kube-api-access-2frlg\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676748 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-serving-cert\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676773 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k4rs\" (UniqueName: \"kubernetes.io/projected/831a57b7-e553-4c53-a658-b10d4183d514-kube-api-access-5k4rs\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676796 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65kxl\" (UniqueName: \"kubernetes.io/projected/40c806a8-67f5-497d-91d8-6ce10f60e79a-kube-api-access-65kxl\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676818 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/60b8d39a-f9d8-423e-9a88-2842a04d96a4-audit-dir\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676843 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnrnc\" (UniqueName: \"kubernetes.io/projected/0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5-kube-api-access-jnrnc\") pod \"dns-operator-744455d44c-9qfvj\" (UID: \"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5\") " pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676863 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02eb3fbf-9bcf-4097-80da-07430ae0cceb-serving-cert\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676885 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-etcd-client\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676907 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-trusted-ca-bundle\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676929 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qhrc\" (UniqueName: \"kubernetes.io/projected/2ac8adae-3926-4197-a3ff-2efa92ef82b5-kube-api-access-6qhrc\") pod \"multus-admission-controller-857f4d67dd-58znw\" (UID: \"2ac8adae-3926-4197-a3ff-2efa92ef82b5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676950 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkr82\" (UniqueName: \"kubernetes.io/projected/9579b457-bdfb-49b5-b12a-c14e87593922-kube-api-access-xkr82\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.676995 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-service-ca-bundle\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677017 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7455a54f-ad8c-4bbb-8b56-4e68848ff102-serving-cert\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677055 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-oauth-config\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677079 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-auth-proxy-config\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677103 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-audit-dir\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677125 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppnnw\" (UniqueName: \"kubernetes.io/projected/67c31318-36b0-451f-b849-8859adaebd3f-kube-api-access-ppnnw\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677145 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-config\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677169 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9579b457-bdfb-49b5-b12a-c14e87593922-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677192 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-client\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677215 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-encryption-config\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677236 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-etcd-serving-ca\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677257 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-config\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677280 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-config\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677304 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w7fd\" (UniqueName: \"kubernetes.io/projected/7bd56d38-cc74-420e-ab79-f16c8d36638f-kube-api-access-7w7fd\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677327 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-config\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677349 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ffea777-f433-4e58-a670-8f39f3db893e-config\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677373 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-policies\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677397 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677419 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677457 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677478 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-console-config\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677505 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.677690 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.679165 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-42nbc"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.679787 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.679963 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.680398 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.680472 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.680683 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.684730 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.685158 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.686024 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.686372 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.686481 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.686818 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.687305 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-bflcx"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.688146 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.689359 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.689692 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.690912 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.691249 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.695327 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.697380 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-5tcqw"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.715656 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.715941 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.717350 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.718225 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.718422 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.718234 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.719837 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.722742 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.723657 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.724933 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.727474 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-294qn"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.728774 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.730122 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.731652 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.732352 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.734089 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.734765 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.735464 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.742604 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9f8vx"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.743119 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.743963 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.744946 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sv7gt"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.745129 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.745424 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.746118 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.746891 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.747241 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ft8qp"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.756545 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t5zc5"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.757397 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.759315 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9x9dq"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.760390 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-q4hnr"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.760620 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.761528 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.764708 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.765772 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-gwrr8"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.766857 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778638 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5-metrics-tls\") pod \"dns-operator-744455d44c-9qfvj\" (UID: \"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5\") " pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778678 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-config\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778702 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-etcd-client\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778727 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778749 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/40c806a8-67f5-497d-91d8-6ce10f60e79a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778797 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2frlg\" (UniqueName: \"kubernetes.io/projected/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-kube-api-access-2frlg\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778817 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-serving-cert\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778836 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5k4rs\" (UniqueName: \"kubernetes.io/projected/831a57b7-e553-4c53-a658-b10d4183d514-kube-api-access-5k4rs\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778858 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/01aa8f48-cf86-43a7-874c-cc3cfddbef46-signing-key\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778889 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t6vz\" (UniqueName: \"kubernetes.io/projected/01aa8f48-cf86-43a7-874c-cc3cfddbef46-kube-api-access-2t6vz\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778912 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e942613a-a952-4447-b337-ec6045783c9f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778934 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65kxl\" (UniqueName: \"kubernetes.io/projected/40c806a8-67f5-497d-91d8-6ce10f60e79a-kube-api-access-65kxl\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778955 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/60b8d39a-f9d8-423e-9a88-2842a04d96a4-audit-dir\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.778977 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnrnc\" (UniqueName: \"kubernetes.io/projected/0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5-kube-api-access-jnrnc\") pod \"dns-operator-744455d44c-9qfvj\" (UID: \"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5\") " pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779003 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02eb3fbf-9bcf-4097-80da-07430ae0cceb-serving-cert\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779026 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-etcd-client\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779064 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-trusted-ca-bundle\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779089 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qhrc\" (UniqueName: \"kubernetes.io/projected/2ac8adae-3926-4197-a3ff-2efa92ef82b5-kube-api-access-6qhrc\") pod \"multus-admission-controller-857f4d67dd-58znw\" (UID: \"2ac8adae-3926-4197-a3ff-2efa92ef82b5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779114 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkr82\" (UniqueName: \"kubernetes.io/projected/9579b457-bdfb-49b5-b12a-c14e87593922-kube-api-access-xkr82\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779159 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-service-ca-bundle\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779182 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-tmpfs\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779206 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7455a54f-ad8c-4bbb-8b56-4e68848ff102-serving-cert\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779227 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-oauth-config\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779281 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-auth-proxy-config\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779308 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-audit-dir\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779339 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppnnw\" (UniqueName: \"kubernetes.io/projected/67c31318-36b0-451f-b849-8859adaebd3f-kube-api-access-ppnnw\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779363 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-config\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9579b457-bdfb-49b5-b12a-c14e87593922-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779411 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-client\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779433 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-encryption-config\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779452 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-etcd-serving-ca\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779475 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-config\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779496 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-config\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779519 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779544 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7w7fd\" (UniqueName: \"kubernetes.io/projected/7bd56d38-cc74-420e-ab79-f16c8d36638f-kube-api-access-7w7fd\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779565 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-config\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779588 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ffea777-f433-4e58-a670-8f39f3db893e-config\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779611 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-policies\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779641 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779673 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779730 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779796 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-console-config\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779844 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779868 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779892 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/40c806a8-67f5-497d-91d8-6ce10f60e79a-images\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779952 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rblz4\" (UniqueName: \"kubernetes.io/projected/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-kube-api-access-rblz4\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.779978 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/60b8d39a-f9d8-423e-9a88-2842a04d96a4-node-pullsecrets\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780003 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67c31318-36b0-451f-b849-8859adaebd3f-serving-cert\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780024 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-ca\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780070 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780106 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6566c85-0449-4bdc-b9ce-3fb02da651dd-config\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780250 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-serving-cert\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780278 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2ac8adae-3926-4197-a3ff-2efa92ef82b5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-58znw\" (UID: \"2ac8adae-3926-4197-a3ff-2efa92ef82b5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780317 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-apiservice-cert\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780339 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-image-import-ca\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780368 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/870f8bfa-3583-43fd-b298-2f96c5a74dd4-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780392 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6566c85-0449-4bdc-b9ce-3fb02da651dd-trusted-ca\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780438 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96mrw\" (UniqueName: \"kubernetes.io/projected/c6566c85-0449-4bdc-b9ce-3fb02da651dd-kube-api-access-96mrw\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780495 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e93e70f9-30a0-4254-8cc9-2988b9028297-serving-cert\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780521 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-machine-approver-tls\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780546 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9135b1d0-1179-43b5-ae25-937d636e58dd-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780587 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9135b1d0-1179-43b5-ae25-937d636e58dd-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780611 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/01aa8f48-cf86-43a7-874c-cc3cfddbef46-signing-cabundle\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780633 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e942613a-a952-4447-b337-ec6045783c9f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780658 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780680 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780748 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-config\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780808 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-policies\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780947 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/67c31318-36b0-451f-b849-8859adaebd3f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.780978 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-webhook-cert\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781003 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kjlx\" (UniqueName: \"kubernetes.io/projected/988ea40c-2af0-4145-af3b-42d26d0e94a2-kube-api-access-4kjlx\") pod \"migrator-59844c95c7-6j7k9\" (UID: \"988ea40c-2af0-4145-af3b-42d26d0e94a2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781028 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92684e7e-cfae-421b-b356-5cb5d6b38879-serving-cert\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781069 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d5ee928-c507-4716-9b87-3175c6a3ab44-profile-collector-cert\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781090 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781112 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6cht\" (UniqueName: \"kubernetes.io/projected/e93e70f9-30a0-4254-8cc9-2988b9028297-kube-api-access-t6cht\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781132 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-encryption-config\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781155 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-config\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781153 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781177 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/9579b457-bdfb-49b5-b12a-c14e87593922-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781223 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9579b457-bdfb-49b5-b12a-c14e87593922-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781264 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsdmv\" (UniqueName: \"kubernetes.io/projected/5d5ee928-c507-4716-9b87-3175c6a3ab44-kube-api-access-nsdmv\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781305 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781328 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8shw\" (UniqueName: \"kubernetes.io/projected/92684e7e-cfae-421b-b356-5cb5d6b38879-kube-api-access-p8shw\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781355 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c806a8-67f5-497d-91d8-6ce10f60e79a-config\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781382 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781407 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-client-ca\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781432 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm6cj\" (UniqueName: \"kubernetes.io/projected/7455a54f-ad8c-4bbb-8b56-4e68848ff102-kube-api-access-dm6cj\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781456 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-audit\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781482 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mfc4\" (UniqueName: \"kubernetes.io/projected/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-kube-api-access-6mfc4\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781520 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdk26\" (UniqueName: \"kubernetes.io/projected/60b8d39a-f9d8-423e-9a88-2842a04d96a4-kube-api-access-gdk26\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781543 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjwpg\" (UniqueName: \"kubernetes.io/projected/870f8bfa-3583-43fd-b298-2f96c5a74dd4-kube-api-access-sjwpg\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781567 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slglm\" (UniqueName: \"kubernetes.io/projected/f6bef010-718c-468f-bc70-2424cd10e735-kube-api-access-slglm\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781590 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r54cz\" (UniqueName: \"kubernetes.io/projected/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-kube-api-access-r54cz\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781614 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781638 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781662 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c6566c85-0449-4bdc-b9ce-3fb02da651dd-serving-cert\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781700 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781723 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e942613a-a952-4447-b337-ec6045783c9f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781767 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ffea777-f433-4e58-a670-8f39f3db893e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781791 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69svt\" (UniqueName: \"kubernetes.io/projected/02eb3fbf-9bcf-4097-80da-07430ae0cceb-kube-api-access-69svt\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781816 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-dir\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781840 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-client-ca\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781862 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/870f8bfa-3583-43fd-b298-2f96c5a74dd4-config\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781886 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781919 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781945 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ffea777-f433-4e58-a670-8f39f3db893e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781967 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-audit-policies\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.781987 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-service-ca\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.782010 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d5ee928-c507-4716-9b87-3175c6a3ab44-srv-cert\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.782033 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-oauth-serving-cert\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.782072 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9135b1d0-1179-43b5-ae25-937d636e58dd-config\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.782100 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.782121 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-serving-cert\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.782141 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-service-ca\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.782835 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.782965 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-service-ca\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.784156 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/60b8d39a-f9d8-423e-9a88-2842a04d96a4-audit-dir\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.785477 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9579b457-bdfb-49b5-b12a-c14e87593922-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.786127 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.786966 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40c806a8-67f5-497d-91d8-6ce10f60e79a-config\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.787759 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.788528 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-client-ca\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.789646 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-audit\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.789825 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-etcd-serving-ca\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.791011 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.793736 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-config\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.794227 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-console-config\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.798553 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-image-import-ca\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.800245 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9qfvj"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.800288 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.800702 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-config\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.800884 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.804495 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.804747 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/67c31318-36b0-451f-b849-8859adaebd3f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.806614 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60b8d39a-f9d8-423e-9a88-2842a04d96a4-config\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.807163 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-config\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.807748 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.808800 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/40c806a8-67f5-497d-91d8-6ce10f60e79a-images\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.809738 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6566c85-0449-4bdc-b9ce-3fb02da651dd-trusted-ca\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.809853 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.810528 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.811029 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-config\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.812915 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5-metrics-tls\") pod \"dns-operator-744455d44c-9qfvj\" (UID: \"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5\") " pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.813287 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-etcd-client\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.813498 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-encryption-config\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.813895 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/9579b457-bdfb-49b5-b12a-c14e87593922-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.814542 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.814781 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.815157 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/40c806a8-67f5-497d-91d8-6ce10f60e79a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.815207 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-trusted-ca-bundle\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.815453 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/870f8bfa-3583-43fd-b298-2f96c5a74dd4-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.815753 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.815762 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7455a54f-ad8c-4bbb-8b56-4e68848ff102-service-ca-bundle\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.816583 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-ca\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.817100 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6566c85-0449-4bdc-b9ce-3fb02da651dd-config\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.817365 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-dir\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.818100 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-client-ca\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.818534 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/870f8bfa-3583-43fd-b298-2f96c5a74dd4-config\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.818877 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.819146 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-etcd-client\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.819351 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-encryption-config\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.819398 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/60b8d39a-f9d8-423e-9a88-2842a04d96a4-node-pullsecrets\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.819785 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-audit-policies\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.820313 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-auth-proxy-config\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.823888 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-service-ca\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.824378 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-oauth-serving-cert\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.827646 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.827698 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-jpvjs"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.827711 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.828768 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.828785 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-audit-dir\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.829178 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.829295 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60b8d39a-f9d8-423e-9a88-2842a04d96a4-serving-cert\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.829315 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/92684e7e-cfae-421b-b356-5cb5d6b38879-etcd-client\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.829670 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.829733 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02eb3fbf-9bcf-4097-80da-07430ae0cceb-serving-cert\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.829778 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e93e70f9-30a0-4254-8cc9-2988b9028297-serving-cert\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.830005 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c6566c85-0449-4bdc-b9ce-3fb02da651dd-serving-cert\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.830094 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92684e7e-cfae-421b-b356-5cb5d6b38879-serving-cert\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.830231 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-serving-cert\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.830316 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-machine-approver-tls\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.830369 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.830592 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67c31318-36b0-451f-b849-8859adaebd3f-serving-cert\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.830629 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2kv29"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.830685 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-oauth-config\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.831371 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.832406 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.832765 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7455a54f-ad8c-4bbb-8b56-4e68848ff102-serving-cert\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.834515 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9135b1d0-1179-43b5-ae25-937d636e58dd-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.835176 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-z9v5r"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.837239 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-serving-cert\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.837411 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.837676 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2ac8adae-3926-4197-a3ff-2efa92ef82b5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-58znw\" (UID: \"2ac8adae-3926-4197-a3ff-2efa92ef82b5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.839163 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.840513 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.841456 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-24dgg"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.842300 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.843158 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-qbt7f"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.843945 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.844067 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-qbt7f" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.844659 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.847280 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.855701 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.857866 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.860539 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-58znw"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.862881 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.866673 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.868932 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.871540 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.873944 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sv7gt"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.877270 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-5tcqw"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.880121 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.881108 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.882503 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-sb94j"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883522 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slglm\" (UniqueName: \"kubernetes.io/projected/f6bef010-718c-468f-bc70-2424cd10e735-kube-api-access-slglm\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883555 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r54cz\" (UniqueName: \"kubernetes.io/projected/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-kube-api-access-r54cz\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883575 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mfc4\" (UniqueName: \"kubernetes.io/projected/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-kube-api-access-6mfc4\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883614 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e942613a-a952-4447-b337-ec6045783c9f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883635 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883678 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d5ee928-c507-4716-9b87-3175c6a3ab44-srv-cert\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883730 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/01aa8f48-cf86-43a7-874c-cc3cfddbef46-signing-key\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883748 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t6vz\" (UniqueName: \"kubernetes.io/projected/01aa8f48-cf86-43a7-874c-cc3cfddbef46-kube-api-access-2t6vz\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883764 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e942613a-a952-4447-b337-ec6045783c9f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883813 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-tmpfs\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883851 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883913 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883930 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-apiservice-cert\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883963 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/01aa8f48-cf86-43a7-874c-cc3cfddbef46-signing-cabundle\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883977 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e942613a-a952-4447-b337-ec6045783c9f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.883995 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-webhook-cert\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.884012 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kjlx\" (UniqueName: \"kubernetes.io/projected/988ea40c-2af0-4145-af3b-42d26d0e94a2-kube-api-access-4kjlx\") pod \"migrator-59844c95c7-6j7k9\" (UID: \"988ea40c-2af0-4145-af3b-42d26d0e94a2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.884030 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d5ee928-c507-4716-9b87-3175c6a3ab44-profile-collector-cert\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.884069 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.884108 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsdmv\" (UniqueName: \"kubernetes.io/projected/5d5ee928-c507-4716-9b87-3175c6a3ab44-kube-api-access-nsdmv\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.884405 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-tmpfs\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.884416 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.885328 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-294qn"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.886788 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.888288 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9x9dq"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.889951 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9135b1d0-1179-43b5-ae25-937d636e58dd-config\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.890025 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-qbt7f"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.891310 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.892649 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-24dgg"] Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.903790 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.915371 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ffea777-f433-4e58-a670-8f39f3db893e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.923054 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.942253 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.962299 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.968865 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ffea777-f433-4e58-a670-8f39f3db893e-config\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:54 crc kubenswrapper[4940]: I1126 06:56:54.983186 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.002230 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.022871 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.042419 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.063281 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.082799 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.103242 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.122757 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.143152 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.164034 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.164452 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.164488 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.183618 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.210874 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.223398 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.243094 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.263167 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.283238 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.303487 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.323847 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.343301 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.363194 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.383474 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.402554 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.423142 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.429087 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d5ee928-c507-4716-9b87-3175c6a3ab44-srv-cert\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.443946 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.463596 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.469763 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d5ee928-c507-4716-9b87-3175c6a3ab44-profile-collector-cert\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.483779 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.503749 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.523934 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.542205 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.562381 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.583902 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.602855 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.609861 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/01aa8f48-cf86-43a7-874c-cc3cfddbef46-signing-key\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.624198 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.636552 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/01aa8f48-cf86-43a7-874c-cc3cfddbef46-signing-cabundle\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.643602 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.662684 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.683421 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.704818 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.721701 4940 request.go:700] Waited for 1.002755205s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/secrets?fieldSelector=metadata.name%3Dservice-ca-operator-dockercfg-rg9jl&limit=500&resourceVersion=0 Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.724031 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.744020 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.763249 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.768763 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-webhook-cert\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.769425 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-apiservice-cert\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.783294 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.804195 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.824619 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.843979 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.864374 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.883957 4940 configmap.go:193] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: failed to sync configmap cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.883980 4940 configmap.go:193] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.884110 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.884115 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-config podName:c43a8a22-cd03-4112-96ab-e8a7bd2819ef nodeName:}" failed. No retries permitted until 2025-11-26 06:56:56.3840288 +0000 UTC m=+117.904170449 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-config") pod "kube-storage-version-migrator-operator-b67b599dd-j7x6x" (UID: "c43a8a22-cd03-4112-96ab-e8a7bd2819ef") : failed to sync configmap cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.884200 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e942613a-a952-4447-b337-ec6045783c9f-config podName:e942613a-a952-4447-b337-ec6045783c9f nodeName:}" failed. No retries permitted until 2025-11-26 06:56:56.384176255 +0000 UTC m=+117.904317884 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e942613a-a952-4447-b337-ec6045783c9f-config") pod "openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" (UID: "e942613a-a952-4447-b337-ec6045783c9f") : failed to sync configmap cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.884228 4940 secret.go:188] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.884259 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e942613a-a952-4447-b337-ec6045783c9f-serving-cert podName:e942613a-a952-4447-b337-ec6045783c9f nodeName:}" failed. No retries permitted until 2025-11-26 06:56:56.384250918 +0000 UTC m=+117.904392547 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e942613a-a952-4447-b337-ec6045783c9f-serving-cert") pod "openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" (UID: "e942613a-a952-4447-b337-ec6045783c9f") : failed to sync secret cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.884275 4940 secret.go:188] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.884301 4940 configmap.go:193] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.884328 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca podName:f6bef010-718c-468f-bc70-2424cd10e735 nodeName:}" failed. No retries permitted until 2025-11-26 06:56:56.38432055 +0000 UTC m=+117.904462179 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca") pod "marketplace-operator-79b997595-294qn" (UID: "f6bef010-718c-468f-bc70-2424cd10e735") : failed to sync configmap cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: E1126 06:56:55.884367 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-serving-cert podName:c43a8a22-cd03-4112-96ab-e8a7bd2819ef nodeName:}" failed. No retries permitted until 2025-11-26 06:56:56.38433988 +0000 UTC m=+117.904481669 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-serving-cert") pod "kube-storage-version-migrator-operator-b67b599dd-j7x6x" (UID: "c43a8a22-cd03-4112-96ab-e8a7bd2819ef") : failed to sync secret cache: timed out waiting for the condition Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.892865 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.910821 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.923341 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.943351 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.963283 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 06:56:55 crc kubenswrapper[4940]: I1126 06:56:55.984212 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.003479 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.023784 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.043403 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.063380 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.083361 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.104277 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.123201 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.143269 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.164150 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.164509 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.164526 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.182868 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.203579 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.223723 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.243901 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.263468 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.283644 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.302926 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.323725 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.344092 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.384134 4940 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.404028 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.409784 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e942613a-a952-4447-b337-ec6045783c9f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.410023 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.410188 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e942613a-a952-4447-b337-ec6045783c9f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.410335 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.410442 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.411672 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.411831 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e942613a-a952-4447-b337-ec6045783c9f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.412741 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.414994 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e942613a-a952-4447-b337-ec6045783c9f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.415831 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.423904 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.443462 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.463232 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.483675 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.525703 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65kxl\" (UniqueName: \"kubernetes.io/projected/40c806a8-67f5-497d-91d8-6ce10f60e79a-kube-api-access-65kxl\") pod \"machine-api-operator-5694c8668f-gwrr8\" (UID: \"40c806a8-67f5-497d-91d8-6ce10f60e79a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.539951 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9579b457-bdfb-49b5-b12a-c14e87593922-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.547996 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.563311 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnrnc\" (UniqueName: \"kubernetes.io/projected/0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5-kube-api-access-jnrnc\") pod \"dns-operator-744455d44c-9qfvj\" (UID: \"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5\") " pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.582508 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8shw\" (UniqueName: \"kubernetes.io/projected/92684e7e-cfae-421b-b356-5cb5d6b38879-kube-api-access-p8shw\") pod \"etcd-operator-b45778765-jpvjs\" (UID: \"92684e7e-cfae-421b-b356-5cb5d6b38879\") " pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.620837 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm6cj\" (UniqueName: \"kubernetes.io/projected/7455a54f-ad8c-4bbb-8b56-4e68848ff102-kube-api-access-dm6cj\") pod \"authentication-operator-69f744f599-9f8vx\" (UID: \"7455a54f-ad8c-4bbb-8b56-4e68848ff102\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.657886 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9135b1d0-1179-43b5-ae25-937d636e58dd-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-s22s9\" (UID: \"9135b1d0-1179-43b5-ae25-937d636e58dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.670881 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdk26\" (UniqueName: \"kubernetes.io/projected/60b8d39a-f9d8-423e-9a88-2842a04d96a4-kube-api-access-gdk26\") pod \"apiserver-76f77b778f-z9v5r\" (UID: \"60b8d39a-f9d8-423e-9a88-2842a04d96a4\") " pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.674494 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.697421 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.698151 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjwpg\" (UniqueName: \"kubernetes.io/projected/870f8bfa-3583-43fd-b298-2f96c5a74dd4-kube-api-access-sjwpg\") pod \"openshift-apiserver-operator-796bbdcf4f-sn6sg\" (UID: \"870f8bfa-3583-43fd-b298-2f96c5a74dd4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.706253 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2frlg\" (UniqueName: \"kubernetes.io/projected/2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0-kube-api-access-2frlg\") pod \"apiserver-7bbb656c7d-55f84\" (UID: \"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.713300 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.724969 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96mrw\" (UniqueName: \"kubernetes.io/projected/c6566c85-0449-4bdc-b9ce-3fb02da651dd-kube-api-access-96mrw\") pod \"console-operator-58897d9998-t5zc5\" (UID: \"c6566c85-0449-4bdc-b9ce-3fb02da651dd\") " pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.741209 4940 request.go:700] Waited for 1.933077791s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/console/token Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.742019 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w7fd\" (UniqueName: \"kubernetes.io/projected/7bd56d38-cc74-420e-ab79-f16c8d36638f-kube-api-access-7w7fd\") pod \"oauth-openshift-558db77b4-2kv29\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.761569 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-gwrr8"] Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.763236 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k4rs\" (UniqueName: \"kubernetes.io/projected/831a57b7-e553-4c53-a658-b10d4183d514-kube-api-access-5k4rs\") pod \"console-f9d7485db-bflcx\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.766463 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.782128 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6cht\" (UniqueName: \"kubernetes.io/projected/e93e70f9-30a0-4254-8cc9-2988b9028297-kube-api-access-t6cht\") pod \"route-controller-manager-6576b87f9c-8pf88\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.791953 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.801477 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rblz4\" (UniqueName: \"kubernetes.io/projected/7e0fb7a9-3242-44dd-956d-f85a3f5f1cde-kube-api-access-rblz4\") pod \"machine-approver-56656f9798-4tvq7\" (UID: \"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.819652 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qhrc\" (UniqueName: \"kubernetes.io/projected/2ac8adae-3926-4197-a3ff-2efa92ef82b5-kube-api-access-6qhrc\") pod \"multus-admission-controller-857f4d67dd-58znw\" (UID: \"2ac8adae-3926-4197-a3ff-2efa92ef82b5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.820171 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.838193 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkr82\" (UniqueName: \"kubernetes.io/projected/9579b457-bdfb-49b5-b12a-c14e87593922-kube-api-access-xkr82\") pod \"cluster-image-registry-operator-dc59b4c8b-s55tc\" (UID: \"9579b457-bdfb-49b5-b12a-c14e87593922\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.842256 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.853954 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.861079 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.869084 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" event={"ID":"40c806a8-67f5-497d-91d8-6ce10f60e79a","Type":"ContainerStarted","Data":"8f53e1308494ba8f14aea256aeeb8cdee352eb2661c048b2c10def10f47c551a"} Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.874954 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69svt\" (UniqueName: \"kubernetes.io/projected/02eb3fbf-9bcf-4097-80da-07430ae0cceb-kube-api-access-69svt\") pod \"controller-manager-879f6c89f-ft8qp\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.880234 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ffea777-f433-4e58-a670-8f39f3db893e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jk4hv\" (UID: \"2ffea777-f433-4e58-a670-8f39f3db893e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.897211 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppnnw\" (UniqueName: \"kubernetes.io/projected/67c31318-36b0-451f-b849-8859adaebd3f-kube-api-access-ppnnw\") pod \"openshift-config-operator-7777fb866f-4bs4k\" (UID: \"67c31318-36b0-451f-b849-8859adaebd3f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.903611 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.923744 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.942775 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.964817 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9qfvj"] Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.965320 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.983433 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9"] Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.985103 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" Nov 26 06:56:56 crc kubenswrapper[4940]: I1126 06:56:56.985294 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.004862 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.007914 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.014993 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.024090 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 06:56:57 crc kubenswrapper[4940]: W1126 06:56:57.024735 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b6a5b0d_3f28_4bc2_95a9_8bdc82ff2eb5.slice/crio-9da0fadafb4fbf7432a491b13f9e2c7721a7bccd4b90935896e9892e52d56966 WatchSource:0}: Error finding container 9da0fadafb4fbf7432a491b13f9e2c7721a7bccd4b90935896e9892e52d56966: Status 404 returned error can't find the container with id 9da0fadafb4fbf7432a491b13f9e2c7721a7bccd4b90935896e9892e52d56966 Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.029152 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" Nov 26 06:56:57 crc kubenswrapper[4940]: W1126 06:56:57.036853 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9135b1d0_1179_43b5_ae25_937d636e58dd.slice/crio-99ba2469941091885d5432cfe5e12f5bdf6f6d78a010044827cbf93816375761 WatchSource:0}: Error finding container 99ba2469941091885d5432cfe5e12f5bdf6f6d78a010044827cbf93816375761: Status 404 returned error can't find the container with id 99ba2469941091885d5432cfe5e12f5bdf6f6d78a010044827cbf93816375761 Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.051065 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.063984 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r54cz\" (UniqueName: \"kubernetes.io/projected/bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0-kube-api-access-r54cz\") pod \"packageserver-d55dfcdfc-wpzwc\" (UID: \"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.075454 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.080387 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.087588 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.091943 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slglm\" (UniqueName: \"kubernetes.io/projected/f6bef010-718c-468f-bc70-2424cd10e735-kube-api-access-slglm\") pod \"marketplace-operator-79b997595-294qn\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.098761 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t6vz\" (UniqueName: \"kubernetes.io/projected/01aa8f48-cf86-43a7-874c-cc3cfddbef46-kube-api-access-2t6vz\") pod \"service-ca-9c57cc56f-5tcqw\" (UID: \"01aa8f48-cf86-43a7-874c-cc3cfddbef46\") " pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.100685 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:56:57 crc kubenswrapper[4940]: W1126 06:56:57.106262 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod870f8bfa_3583_43fd_b298_2f96c5a74dd4.slice/crio-3e0ef05aaa17ebb0d0c28553a00f47683679d126162c91905327c84eca12db47 WatchSource:0}: Error finding container 3e0ef05aaa17ebb0d0c28553a00f47683679d126162c91905327c84eca12db47: Status 404 returned error can't find the container with id 3e0ef05aaa17ebb0d0c28553a00f47683679d126162c91905327c84eca12db47 Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.116099 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-jpvjs"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.116850 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e942613a-a952-4447-b337-ec6045783c9f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-fw4tw\" (UID: \"e942613a-a952-4447-b337-ec6045783c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.134696 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.184673 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.188891 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.195833 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsdmv\" (UniqueName: \"kubernetes.io/projected/5d5ee928-c507-4716-9b87-3175c6a3ab44-kube-api-access-nsdmv\") pod \"catalog-operator-68c6474976-5dkdx\" (UID: \"5d5ee928-c507-4716-9b87-3175c6a3ab44\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:57 crc kubenswrapper[4940]: W1126 06:56:57.198180 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e0fb7a9_3242_44dd_956d_f85a3f5f1cde.slice/crio-40794d35237867b3471448cc45ded4c18a1e8f7de171931885811e9ab6bf3bf8 WatchSource:0}: Error finding container 40794d35237867b3471448cc45ded4c18a1e8f7de171931885811e9ab6bf3bf8: Status 404 returned error can't find the container with id 40794d35237867b3471448cc45ded4c18a1e8f7de171931885811e9ab6bf3bf8 Nov 26 06:56:57 crc kubenswrapper[4940]: W1126 06:56:57.199026 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92684e7e_cfae_421b_b356_5cb5d6b38879.slice/crio-17955a747c0233252de328c8688c3561ff15330cfdc31b9f5f86d70dd07fcdfd WatchSource:0}: Error finding container 17955a747c0233252de328c8688c3561ff15330cfdc31b9f5f86d70dd07fcdfd: Status 404 returned error can't find the container with id 17955a747c0233252de328c8688c3561ff15330cfdc31b9f5f86d70dd07fcdfd Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.203068 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.203251 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mfc4\" (UniqueName: \"kubernetes.io/projected/c43a8a22-cd03-4112-96ab-e8a7bd2819ef-kube-api-access-6mfc4\") pod \"kube-storage-version-migrator-operator-b67b599dd-j7x6x\" (UID: \"c43a8a22-cd03-4112-96ab-e8a7bd2819ef\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.224129 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.233809 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kjlx\" (UniqueName: \"kubernetes.io/projected/988ea40c-2af0-4145-af3b-42d26d0e94a2-kube-api-access-4kjlx\") pod \"migrator-59844c95c7-6j7k9\" (UID: \"988ea40c-2af0-4145-af3b-42d26d0e94a2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.242898 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.255523 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9f8vx"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.261308 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-z9v5r"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.262831 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.284886 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 06:56:57 crc kubenswrapper[4940]: W1126 06:56:57.313603 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod60b8d39a_f9d8_423e_9a88_2842a04d96a4.slice/crio-eef8fb6399f6e71ed2729313fd709dbe0a15e794704e26c7fc6a73d7050c2948 WatchSource:0}: Error finding container eef8fb6399f6e71ed2729313fd709dbe0a15e794704e26c7fc6a73d7050c2948: Status 404 returned error can't find the container with id eef8fb6399f6e71ed2729313fd709dbe0a15e794704e26c7fc6a73d7050c2948 Nov 26 06:56:57 crc kubenswrapper[4940]: W1126 06:56:57.316515 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7455a54f_ad8c_4bbb_8b56_4e68848ff102.slice/crio-e42448e19bc3769557bf05a1f5b7e8b155e067af1a2e60f6b32b37ba7ecb7c83 WatchSource:0}: Error finding container e42448e19bc3769557bf05a1f5b7e8b155e067af1a2e60f6b32b37ba7ecb7c83: Status 404 returned error can't find the container with id e42448e19bc3769557bf05a1f5b7e8b155e067af1a2e60f6b32b37ba7ecb7c83 Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.317956 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-58znw"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330201 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-stats-auth\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330265 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/434b74d5-9895-4254-8d8a-17fec36577ab-secret-volume\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330297 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22919f3b-67d1-4b9a-a0fd-476ba56e9918-service-ca-bundle\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330319 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01c54419-24df-4a02-9671-7457e0540ca7-serving-cert\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330340 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvplc\" (UniqueName: \"kubernetes.io/projected/37976138-9ec7-4db0-943e-aaa3e84c96d6-kube-api-access-hvplc\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330361 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2d74f8fa-d746-4920-ac46-e6a4a7a501f6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gt48v\" (UID: \"2d74f8fa-d746-4920-ac46-e6a4a7a501f6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330381 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnmdp\" (UniqueName: \"kubernetes.io/projected/01c54419-24df-4a02-9671-7457e0540ca7-kube-api-access-cnmdp\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330398 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/efe355d7-3ba1-451e-aebd-271c367e186c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330414 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwr4z\" (UniqueName: \"kubernetes.io/projected/2d74f8fa-d746-4920-ac46-e6a4a7a501f6-kube-api-access-rwr4z\") pod \"control-plane-machine-set-operator-78cbb6b69f-gt48v\" (UID: \"2d74f8fa-d746-4920-ac46-e6a4a7a501f6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330429 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3fc66d85-3d35-4198-9854-c26921a576a6-srv-cert\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330443 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330459 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3fc66d85-3d35-4198-9854-c26921a576a6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330476 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bkz7\" (UniqueName: \"kubernetes.io/projected/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-kube-api-access-2bkz7\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330526 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f7mz\" (UniqueName: \"kubernetes.io/projected/22919f3b-67d1-4b9a-a0fd-476ba56e9918-kube-api-access-4f7mz\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330560 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-metrics-certs\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330595 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37976138-9ec7-4db0-943e-aaa3e84c96d6-metrics-tls\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330633 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4bms\" (UniqueName: \"kubernetes.io/projected/3fc66d85-3d35-4198-9854-c26921a576a6-kube-api-access-k4bms\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330670 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-registry-certificates\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330686 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330732 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37976138-9ec7-4db0-943e-aaa3e84c96d6-trusted-ca\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330748 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330766 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pq29\" (UniqueName: \"kubernetes.io/projected/434b74d5-9895-4254-8d8a-17fec36577ab-kube-api-access-2pq29\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330814 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btb26\" (UniqueName: \"kubernetes.io/projected/036446d1-5a1d-40f2-be92-7a8950c3efff-kube-api-access-btb26\") pod \"cluster-samples-operator-665b6dd947-vf4n5\" (UID: \"036446d1-5a1d-40f2-be92-7a8950c3efff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330863 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zmzz\" (UniqueName: \"kubernetes.io/projected/647d3086-00d3-476f-879f-dd922b1c1313-kube-api-access-5zmzz\") pod \"downloads-7954f5f757-sb94j\" (UID: \"647d3086-00d3-476f-879f-dd922b1c1313\") " pod="openshift-console/downloads-7954f5f757-sb94j" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330889 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-default-certificate\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330904 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/efe355d7-3ba1-451e-aebd-271c367e186c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330934 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-proxy-tls\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330948 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msfhs\" (UniqueName: \"kubernetes.io/projected/413a35f9-b30d-4825-92ea-ff4164212404-kube-api-access-msfhs\") pod \"package-server-manager-789f6589d5-dshqv\" (UID: \"413a35f9-b30d-4825-92ea-ff4164212404\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330965 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.330986 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331003 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz9m8\" (UniqueName: \"kubernetes.io/projected/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-kube-api-access-qz9m8\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331029 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-bound-sa-token\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331077 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwhws\" (UniqueName: \"kubernetes.io/projected/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-kube-api-access-vwhws\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331106 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-trusted-ca\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331123 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/036446d1-5a1d-40f2-be92-7a8950c3efff-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vf4n5\" (UID: \"036446d1-5a1d-40f2-be92-7a8950c3efff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331173 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01c54419-24df-4a02-9671-7457e0540ca7-config\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331188 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/413a35f9-b30d-4825-92ea-ff4164212404-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-dshqv\" (UID: \"413a35f9-b30d-4825-92ea-ff4164212404\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331203 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-images\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331235 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/434b74d5-9895-4254-8d8a-17fec36577ab-config-volume\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331253 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-registry-tls\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331271 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/37976138-9ec7-4db0-943e-aaa3e84c96d6-bound-sa-token\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331312 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-proxy-tls\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.331360 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwhkt\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-kube-api-access-pwhkt\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: E1126 06:56:57.335608 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:57.835592052 +0000 UTC m=+119.355733671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.348497 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.348661 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.355007 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.361973 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2kv29"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.374097 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.374726 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t5zc5"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.377012 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-bflcx"] Nov 26 06:56:57 crc kubenswrapper[4940]: W1126 06:56:57.390623 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7bd56d38_cc74_420e_ab79_f16c8d36638f.slice/crio-0e1a88906edb02d7681ffae619b3c58c5b33cee30a1486271d784132ff2ac4ea WatchSource:0}: Error finding container 0e1a88906edb02d7681ffae619b3c58c5b33cee30a1486271d784132ff2ac4ea: Status 404 returned error can't find the container with id 0e1a88906edb02d7681ffae619b3c58c5b33cee30a1486271d784132ff2ac4ea Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.413614 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.428426 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.433123 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:57 crc kubenswrapper[4940]: E1126 06:56:57.433355 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:57.933324507 +0000 UTC m=+119.453466166 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.433405 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-registry-certificates\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.433477 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.433537 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9jgs\" (UniqueName: \"kubernetes.io/projected/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-kube-api-access-r9jgs\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.433564 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37976138-9ec7-4db0-943e-aaa3e84c96d6-trusted-ca\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.433581 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.433617 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pq29\" (UniqueName: \"kubernetes.io/projected/434b74d5-9895-4254-8d8a-17fec36577ab-kube-api-access-2pq29\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.434671 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh4hr\" (UniqueName: \"kubernetes.io/projected/4befe9c4-9c54-4bea-9798-6b908273b93c-kube-api-access-sh4hr\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.434710 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btb26\" (UniqueName: \"kubernetes.io/projected/036446d1-5a1d-40f2-be92-7a8950c3efff-kube-api-access-btb26\") pod \"cluster-samples-operator-665b6dd947-vf4n5\" (UID: \"036446d1-5a1d-40f2-be92-7a8950c3efff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.435505 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e6611854-e2f1-449b-bcdd-fde4684968d5-metrics-tls\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436129 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-default-certificate\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436172 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zmzz\" (UniqueName: \"kubernetes.io/projected/647d3086-00d3-476f-879f-dd922b1c1313-kube-api-access-5zmzz\") pod \"downloads-7954f5f757-sb94j\" (UID: \"647d3086-00d3-476f-879f-dd922b1c1313\") " pod="openshift-console/downloads-7954f5f757-sb94j" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436207 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/efe355d7-3ba1-451e-aebd-271c367e186c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436257 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-proxy-tls\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436294 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msfhs\" (UniqueName: \"kubernetes.io/projected/413a35f9-b30d-4825-92ea-ff4164212404-kube-api-access-msfhs\") pod \"package-server-manager-789f6589d5-dshqv\" (UID: \"413a35f9-b30d-4825-92ea-ff4164212404\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436326 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436368 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436404 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz9m8\" (UniqueName: \"kubernetes.io/projected/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-kube-api-access-qz9m8\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436435 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-bound-sa-token\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436507 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwhws\" (UniqueName: \"kubernetes.io/projected/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-kube-api-access-vwhws\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436533 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-trusted-ca\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436562 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/036446d1-5a1d-40f2-be92-7a8950c3efff-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vf4n5\" (UID: \"036446d1-5a1d-40f2-be92-7a8950c3efff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436634 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01c54419-24df-4a02-9671-7457e0540ca7-config\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436665 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/413a35f9-b30d-4825-92ea-ff4164212404-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-dshqv\" (UID: \"413a35f9-b30d-4825-92ea-ff4164212404\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436695 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-images\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436734 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/434b74d5-9895-4254-8d8a-17fec36577ab-config-volume\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436788 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-registry-tls\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436818 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/37976138-9ec7-4db0-943e-aaa3e84c96d6-bound-sa-token\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436870 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-node-bootstrap-token\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436899 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-socket-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.436925 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-plugins-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: E1126 06:56:57.437019 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:57.937004315 +0000 UTC m=+119.457145924 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437079 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437033 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37976138-9ec7-4db0-943e-aaa3e84c96d6-trusted-ca\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437127 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-proxy-tls\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437314 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-registration-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437367 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-mountpoint-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437456 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwhkt\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-kube-api-access-pwhkt\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437658 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-stats-auth\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437717 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e6611854-e2f1-449b-bcdd-fde4684968d5-config-volume\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437959 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-registry-certificates\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.437980 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/434b74d5-9895-4254-8d8a-17fec36577ab-secret-volume\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.438154 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22919f3b-67d1-4b9a-a0fd-476ba56e9918-service-ca-bundle\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.438182 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-csi-data-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.438234 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01c54419-24df-4a02-9671-7457e0540ca7-serving-cert\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.438264 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvplc\" (UniqueName: \"kubernetes.io/projected/37976138-9ec7-4db0-943e-aaa3e84c96d6-kube-api-access-hvplc\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.438291 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2d74f8fa-d746-4920-ac46-e6a4a7a501f6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gt48v\" (UID: \"2d74f8fa-d746-4920-ac46-e6a4a7a501f6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.438923 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.438317 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-certs\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.439572 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnmdp\" (UniqueName: \"kubernetes.io/projected/01c54419-24df-4a02-9671-7457e0540ca7-kube-api-access-cnmdp\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.439616 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/efe355d7-3ba1-451e-aebd-271c367e186c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.439686 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwr4z\" (UniqueName: \"kubernetes.io/projected/2d74f8fa-d746-4920-ac46-e6a4a7a501f6-kube-api-access-rwr4z\") pod \"control-plane-machine-set-operator-78cbb6b69f-gt48v\" (UID: \"2d74f8fa-d746-4920-ac46-e6a4a7a501f6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.439745 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3fc66d85-3d35-4198-9854-c26921a576a6-srv-cert\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.439787 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.439815 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3fc66d85-3d35-4198-9854-c26921a576a6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.439858 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bkz7\" (UniqueName: \"kubernetes.io/projected/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-kube-api-access-2bkz7\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.441338 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/efe355d7-3ba1-451e-aebd-271c367e186c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.441852 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvlds\" (UniqueName: \"kubernetes.io/projected/165dbb27-d422-4fe5-9ee0-a2f6b2cff11e-kube-api-access-nvlds\") pod \"ingress-canary-qbt7f\" (UID: \"165dbb27-d422-4fe5-9ee0-a2f6b2cff11e\") " pod="openshift-ingress-canary/ingress-canary-qbt7f" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.442150 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f7mz\" (UniqueName: \"kubernetes.io/projected/22919f3b-67d1-4b9a-a0fd-476ba56e9918-kube-api-access-4f7mz\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.442216 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-metrics-certs\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.442255 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37976138-9ec7-4db0-943e-aaa3e84c96d6-metrics-tls\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.442525 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4bms\" (UniqueName: \"kubernetes.io/projected/3fc66d85-3d35-4198-9854-c26921a576a6-kube-api-access-k4bms\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.442563 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/165dbb27-d422-4fe5-9ee0-a2f6b2cff11e-cert\") pod \"ingress-canary-qbt7f\" (UID: \"165dbb27-d422-4fe5-9ee0-a2f6b2cff11e\") " pod="openshift-ingress-canary/ingress-canary-qbt7f" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.442613 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5lz6\" (UniqueName: \"kubernetes.io/projected/e6611854-e2f1-449b-bcdd-fde4684968d5-kube-api-access-k5lz6\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.446799 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01c54419-24df-4a02-9671-7457e0540ca7-serving-cert\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.448249 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-proxy-tls\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.451949 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-proxy-tls\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.454327 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-trusted-ca\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.454695 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/434b74d5-9895-4254-8d8a-17fec36577ab-config-volume\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.454941 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/434b74d5-9895-4254-8d8a-17fec36577ab-secret-volume\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.455287 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22919f3b-67d1-4b9a-a0fd-476ba56e9918-service-ca-bundle\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.456414 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.456458 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-images\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.456511 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/efe355d7-3ba1-451e-aebd-271c367e186c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.457181 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01c54419-24df-4a02-9671-7457e0540ca7-config\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.457195 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-default-certificate\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.459235 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/036446d1-5a1d-40f2-be92-7a8950c3efff-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vf4n5\" (UID: \"036446d1-5a1d-40f2-be92-7a8950c3efff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.460993 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-registry-tls\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.462719 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.462915 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3fc66d85-3d35-4198-9854-c26921a576a6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.463294 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2d74f8fa-d746-4920-ac46-e6a4a7a501f6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gt48v\" (UID: \"2d74f8fa-d746-4920-ac46-e6a4a7a501f6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.465118 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-stats-auth\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.472714 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37976138-9ec7-4db0-943e-aaa3e84c96d6-metrics-tls\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.474767 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/413a35f9-b30d-4825-92ea-ff4164212404-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-dshqv\" (UID: \"413a35f9-b30d-4825-92ea-ff4164212404\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.481180 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3fc66d85-3d35-4198-9854-c26921a576a6-srv-cert\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.483725 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/22919f3b-67d1-4b9a-a0fd-476ba56e9918-metrics-certs\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.493807 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pq29\" (UniqueName: \"kubernetes.io/projected/434b74d5-9895-4254-8d8a-17fec36577ab-kube-api-access-2pq29\") pod \"collect-profiles-29402325-xn5f8\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.511479 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btb26\" (UniqueName: \"kubernetes.io/projected/036446d1-5a1d-40f2-be92-7a8950c3efff-kube-api-access-btb26\") pod \"cluster-samples-operator-665b6dd947-vf4n5\" (UID: \"036446d1-5a1d-40f2-be92-7a8950c3efff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.517705 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msfhs\" (UniqueName: \"kubernetes.io/projected/413a35f9-b30d-4825-92ea-ff4164212404-kube-api-access-msfhs\") pod \"package-server-manager-789f6589d5-dshqv\" (UID: \"413a35f9-b30d-4825-92ea-ff4164212404\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544259 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544507 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-node-bootstrap-token\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544529 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-socket-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544548 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-plugins-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544578 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-registration-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544596 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-mountpoint-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544624 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e6611854-e2f1-449b-bcdd-fde4684968d5-config-volume\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544638 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-csi-data-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544661 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-certs\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544702 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvlds\" (UniqueName: \"kubernetes.io/projected/165dbb27-d422-4fe5-9ee0-a2f6b2cff11e-kube-api-access-nvlds\") pod \"ingress-canary-qbt7f\" (UID: \"165dbb27-d422-4fe5-9ee0-a2f6b2cff11e\") " pod="openshift-ingress-canary/ingress-canary-qbt7f" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544738 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/165dbb27-d422-4fe5-9ee0-a2f6b2cff11e-cert\") pod \"ingress-canary-qbt7f\" (UID: \"165dbb27-d422-4fe5-9ee0-a2f6b2cff11e\") " pod="openshift-ingress-canary/ingress-canary-qbt7f" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544754 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5lz6\" (UniqueName: \"kubernetes.io/projected/e6611854-e2f1-449b-bcdd-fde4684968d5-kube-api-access-k5lz6\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544772 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9jgs\" (UniqueName: \"kubernetes.io/projected/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-kube-api-access-r9jgs\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544804 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh4hr\" (UniqueName: \"kubernetes.io/projected/4befe9c4-9c54-4bea-9798-6b908273b93c-kube-api-access-sh4hr\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.544823 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e6611854-e2f1-449b-bcdd-fde4684968d5-metrics-tls\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.545840 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e6611854-e2f1-449b-bcdd-fde4684968d5-config-volume\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: E1126 06:56:57.545926 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.045908786 +0000 UTC m=+119.566050405 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.546456 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-plugins-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.546517 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-socket-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.546562 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-registration-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.547096 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-mountpoint-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.547719 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4befe9c4-9c54-4bea-9798-6b908273b93c-csi-data-dir\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.548876 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f7mz\" (UniqueName: \"kubernetes.io/projected/22919f3b-67d1-4b9a-a0fd-476ba56e9918-kube-api-access-4f7mz\") pod \"router-default-5444994796-42nbc\" (UID: \"22919f3b-67d1-4b9a-a0fd-476ba56e9918\") " pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.549229 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/165dbb27-d422-4fe5-9ee0-a2f6b2cff11e-cert\") pod \"ingress-canary-qbt7f\" (UID: \"165dbb27-d422-4fe5-9ee0-a2f6b2cff11e\") " pod="openshift-ingress-canary/ingress-canary-qbt7f" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.549943 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-node-bootstrap-token\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.549986 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-certs\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.550287 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e6611854-e2f1-449b-bcdd-fde4684968d5-metrics-tls\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.565568 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.567813 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz9m8\" (UniqueName: \"kubernetes.io/projected/a3dddcb1-02be-4ac7-8da5-b83e8552a74f-kube-api-access-qz9m8\") pod \"machine-config-operator-74547568cd-h62ll\" (UID: \"a3dddcb1-02be-4ac7-8da5-b83e8552a74f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.573553 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.580620 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-bound-sa-token\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.607154 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvplc\" (UniqueName: \"kubernetes.io/projected/37976138-9ec7-4db0-943e-aaa3e84c96d6-kube-api-access-hvplc\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.627250 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwhws\" (UniqueName: \"kubernetes.io/projected/5e8df8d5-008b-4af5-9b01-13bf857e1ac8-kube-api-access-vwhws\") pod \"openshift-controller-manager-operator-756b6f6bc6-92kwm\" (UID: \"5e8df8d5-008b-4af5-9b01-13bf857e1ac8\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.642424 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.644050 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.645577 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.645709 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/37976138-9ec7-4db0-943e-aaa3e84c96d6-bound-sa-token\") pod \"ingress-operator-5b745b69d9-z2dfd\" (UID: \"37976138-9ec7-4db0-943e-aaa3e84c96d6\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: E1126 06:56:57.645997 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.145980656 +0000 UTC m=+119.666122285 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.667642 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.667781 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnmdp\" (UniqueName: \"kubernetes.io/projected/01c54419-24df-4a02-9671-7457e0540ca7-kube-api-access-cnmdp\") pod \"service-ca-operator-777779d784-8dv5b\" (UID: \"01c54419-24df-4a02-9671-7457e0540ca7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.680429 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.681410 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.694888 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.721639 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.743276 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.747570 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:57 crc kubenswrapper[4940]: E1126 06:56:57.747929 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.247912996 +0000 UTC m=+119.768054615 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.781488 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwhkt\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-kube-api-access-pwhkt\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.782744 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4bms\" (UniqueName: \"kubernetes.io/projected/3fc66d85-3d35-4198-9854-c26921a576a6-kube-api-access-k4bms\") pod \"olm-operator-6b444d44fb-mn29q\" (UID: \"3fc66d85-3d35-4198-9854-c26921a576a6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.786247 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bkz7\" (UniqueName: \"kubernetes.io/projected/bf6ea3c8-972d-4d8b-b0aa-058ceea6c821-kube-api-access-2bkz7\") pod \"machine-config-controller-84d6567774-cttzg\" (UID: \"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.787022 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwr4z\" (UniqueName: \"kubernetes.io/projected/2d74f8fa-d746-4920-ac46-e6a4a7a501f6-kube-api-access-rwr4z\") pod \"control-plane-machine-set-operator-78cbb6b69f-gt48v\" (UID: \"2d74f8fa-d746-4920-ac46-e6a4a7a501f6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.787788 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zmzz\" (UniqueName: \"kubernetes.io/projected/647d3086-00d3-476f-879f-dd922b1c1313-kube-api-access-5zmzz\") pod \"downloads-7954f5f757-sb94j\" (UID: \"647d3086-00d3-476f-879f-dd922b1c1313\") " pod="openshift-console/downloads-7954f5f757-sb94j" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.799632 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5lz6\" (UniqueName: \"kubernetes.io/projected/e6611854-e2f1-449b-bcdd-fde4684968d5-kube-api-access-k5lz6\") pod \"dns-default-24dgg\" (UID: \"e6611854-e2f1-449b-bcdd-fde4684968d5\") " pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.821339 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9jgs\" (UniqueName: \"kubernetes.io/projected/0bd15561-d728-45b6-a33a-e9b3d0e8e5db-kube-api-access-r9jgs\") pod \"machine-config-server-q4hnr\" (UID: \"0bd15561-d728-45b6-a33a-e9b3d0e8e5db\") " pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.847398 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-294qn"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.852293 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh4hr\" (UniqueName: \"kubernetes.io/projected/4befe9c4-9c54-4bea-9798-6b908273b93c-kube-api-access-sh4hr\") pod \"csi-hostpathplugin-9x9dq\" (UID: \"4befe9c4-9c54-4bea-9798-6b908273b93c\") " pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.867015 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvlds\" (UniqueName: \"kubernetes.io/projected/165dbb27-d422-4fe5-9ee0-a2f6b2cff11e-kube-api-access-nvlds\") pod \"ingress-canary-qbt7f\" (UID: \"165dbb27-d422-4fe5-9ee0-a2f6b2cff11e\") " pod="openshift-ingress-canary/ingress-canary-qbt7f" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.869985 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:57 crc kubenswrapper[4940]: E1126 06:56:57.870581 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.370565115 +0000 UTC m=+119.890706734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.893420 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" event={"ID":"9135b1d0-1179-43b5-ae25-937d636e58dd","Type":"ContainerStarted","Data":"707454c838e4ac1d6a5e3094b959946ad1167d55f60eb85dcf85a32989ddfede"} Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.893489 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" event={"ID":"9135b1d0-1179-43b5-ae25-937d636e58dd","Type":"ContainerStarted","Data":"99ba2469941091885d5432cfe5e12f5bdf6f6d78a010044827cbf93816375761"} Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.898710 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" event={"ID":"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0","Type":"ContainerStarted","Data":"a1c6fccb40c7434a11cb4ae00509f2c86d6ef91d3601720a2016a950bd975b5c"} Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.928274 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.928314 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" event={"ID":"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde","Type":"ContainerStarted","Data":"40794d35237867b3471448cc45ded4c18a1e8f7de171931885811e9ab6bf3bf8"} Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.928555 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-sb94j" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.935427 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.940161 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ft8qp"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.959238 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k"] Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.962304 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.929933 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.979301 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:57 crc kubenswrapper[4940]: E1126 06:56:57.980521 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.48050266 +0000 UTC m=+120.000644279 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:57 crc kubenswrapper[4940]: I1126 06:56:57.999353 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" event={"ID":"60b8d39a-f9d8-423e-9a88-2842a04d96a4","Type":"ContainerStarted","Data":"eef8fb6399f6e71ed2729313fd709dbe0a15e794704e26c7fc6a73d7050c2948"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.003175 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" event={"ID":"40c806a8-67f5-497d-91d8-6ce10f60e79a","Type":"ContainerStarted","Data":"76716bcffc53f72b387edc11f75a293346af7c215da123c2870581c124f37f50"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.003226 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" event={"ID":"40c806a8-67f5-497d-91d8-6ce10f60e79a","Type":"ContainerStarted","Data":"e992d3ddec3f1b7770e488c9054a24fd6b030068c8227fab9f50e6b2ebdba951"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.005161 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" event={"ID":"870f8bfa-3583-43fd-b298-2f96c5a74dd4","Type":"ContainerStarted","Data":"490a5345e5609333b041022c1af1b81eaef3ff425c74e4b4490caec18ccca0c3"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.005188 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" event={"ID":"870f8bfa-3583-43fd-b298-2f96c5a74dd4","Type":"ContainerStarted","Data":"3e0ef05aaa17ebb0d0c28553a00f47683679d126162c91905327c84eca12db47"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.006729 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" event={"ID":"9579b457-bdfb-49b5-b12a-c14e87593922","Type":"ContainerStarted","Data":"bda8e1a90f3c584c8bf8d1c278166149fc068fe40c31dbb2f7c71f2cbe0356c7"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.006749 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.008173 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-42nbc" event={"ID":"22919f3b-67d1-4b9a-a0fd-476ba56e9918","Type":"ContainerStarted","Data":"c4ad5d75690c10d8a900e1b12401ea99326bac705ac3b76eb2944fc1d177b639"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.009627 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" event={"ID":"2ac8adae-3926-4197-a3ff-2efa92ef82b5","Type":"ContainerStarted","Data":"fcbb22ddbe175a8a16a51fb86fe83eed55ebec3dd09fba1dd34ceb94a1041bf0"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.010715 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-bflcx" event={"ID":"831a57b7-e553-4c53-a658-b10d4183d514","Type":"ContainerStarted","Data":"62261339630dfe60be92d070f395cafa7aea4039d259e062cc971e14e5b8ff8d"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.012308 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" event={"ID":"7455a54f-ad8c-4bbb-8b56-4e68848ff102","Type":"ContainerStarted","Data":"e42448e19bc3769557bf05a1f5b7e8b155e067af1a2e60f6b32b37ba7ecb7c83"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.013484 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" event={"ID":"c6566c85-0449-4bdc-b9ce-3fb02da651dd","Type":"ContainerStarted","Data":"275f4b23fb7e06bcb27ee6af4d58b62354eb7a055d75bc64e38eab78150f4ff5"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.015217 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" event={"ID":"2ffea777-f433-4e58-a670-8f39f3db893e","Type":"ContainerStarted","Data":"a53fda3a1615f3b24f670c41d3646f6af75c89236bd7b904207228255db8bf88"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.016829 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" event={"ID":"7bd56d38-cc74-420e-ab79-f16c8d36638f","Type":"ContainerStarted","Data":"0e1a88906edb02d7681ffae619b3c58c5b33cee30a1486271d784132ff2ac4ea"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.017806 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" event={"ID":"92684e7e-cfae-421b-b356-5cb5d6b38879","Type":"ContainerStarted","Data":"17955a747c0233252de328c8688c3561ff15330cfdc31b9f5f86d70dd07fcdfd"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.018924 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" event={"ID":"e93e70f9-30a0-4254-8cc9-2988b9028297","Type":"ContainerStarted","Data":"6b29bcdbe5e5901ae945b0f63d4517f25139d14ee62f34663e85f61fdeb28d36"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.026916 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" event={"ID":"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5","Type":"ContainerStarted","Data":"58bd049b58d7adeffe85775391f27e34494fbce697fc631c2b0337b3cbb542e8"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.026954 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" event={"ID":"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5","Type":"ContainerStarted","Data":"9da0fadafb4fbf7432a491b13f9e2c7721a7bccd4b90935896e9892e52d56966"} Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.060147 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.067409 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-q4hnr" Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.072921 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-24dgg" Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.078851 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-qbt7f" Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.081271 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.081737 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.581719727 +0000 UTC m=+120.101861366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.182782 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.182973 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.682936412 +0000 UTC m=+120.203078061 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.183157 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.183768 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.683752338 +0000 UTC m=+120.203893987 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.284936 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.285549 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.785529144 +0000 UTC m=+120.305670783 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.387102 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.387671 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.887648388 +0000 UTC m=+120.407790047 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.428194 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-5tcqw"] Nov 26 06:56:58 crc kubenswrapper[4940]: W1126 06:56:58.445666 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf6bef010_718c_468f_bc70_2424cd10e735.slice/crio-83cd923b885c7e96d9c7a624b41c02dab825a7a926e31e9b2da12a4960230083 WatchSource:0}: Error finding container 83cd923b885c7e96d9c7a624b41c02dab825a7a926e31e9b2da12a4960230083: Status 404 returned error can't find the container with id 83cd923b885c7e96d9c7a624b41c02dab825a7a926e31e9b2da12a4960230083 Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.491784 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.492114 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:58.992098898 +0000 UTC m=+120.512240517 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.548674 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx"] Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.567301 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-gwrr8" podStartSLOduration=98.567278794 podStartE2EDuration="1m38.567278794s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:58.566775368 +0000 UTC m=+120.086916977" watchObservedRunningTime="2025-11-26 06:56:58.567278794 +0000 UTC m=+120.087420413" Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.592884 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.593449 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.093436949 +0000 UTC m=+120.613578558 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.639113 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw"] Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.696905 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.698494 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.198470716 +0000 UTC m=+120.718612525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.755502 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9"] Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.765735 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5"] Nov 26 06:56:58 crc kubenswrapper[4940]: W1126 06:56:58.803260 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode942613a_a952_4447_b337_ec6045783c9f.slice/crio-00e3a81e76a00b784bb6d4dfd4b746cca280b59b1ea632425021f36ac2ddb2c6 WatchSource:0}: Error finding container 00e3a81e76a00b784bb6d4dfd4b746cca280b59b1ea632425021f36ac2ddb2c6: Status 404 returned error can't find the container with id 00e3a81e76a00b784bb6d4dfd4b746cca280b59b1ea632425021f36ac2ddb2c6 Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.811030 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.811486 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.311471939 +0000 UTC m=+120.831613558 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.820119 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x"] Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.835674 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm"] Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.850770 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-sn6sg" podStartSLOduration=98.850749401 podStartE2EDuration="1m38.850749401s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:58.850025858 +0000 UTC m=+120.370167477" watchObservedRunningTime="2025-11-26 06:56:58.850749401 +0000 UTC m=+120.370891020" Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.870062 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll"] Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.902552 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b"] Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.912557 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:58 crc kubenswrapper[4940]: E1126 06:56:58.912980 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.412962954 +0000 UTC m=+120.933104573 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.913916 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv"] Nov 26 06:56:58 crc kubenswrapper[4940]: I1126 06:56:58.997053 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8"] Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.015079 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.015434 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.51542203 +0000 UTC m=+121.035563649 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.017670 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd"] Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.056395 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v"] Nov 26 06:56:59 crc kubenswrapper[4940]: W1126 06:56:59.057225 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01c54419_24df_4a02_9671_7457e0540ca7.slice/crio-b8dcdfae9ad64f5c7aa39a4ca71771f340fd0e82ce63414c68cb6f013117bef3 WatchSource:0}: Error finding container b8dcdfae9ad64f5c7aa39a4ca71771f340fd0e82ce63414c68cb6f013117bef3: Status 404 returned error can't find the container with id b8dcdfae9ad64f5c7aa39a4ca71771f340fd0e82ce63414c68cb6f013117bef3 Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.062344 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" event={"ID":"5d5ee928-c507-4716-9b87-3175c6a3ab44","Type":"ContainerStarted","Data":"e6865e43306a02df1a24ac1187aa80499d266013c7ba69ffd7fe0fd9f5afa269"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.063786 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" event={"ID":"c43a8a22-cd03-4112-96ab-e8a7bd2819ef","Type":"ContainerStarted","Data":"c9048a1685ea47d30a66a19f9e684524622043768bd134dc1b6ba51ff932334e"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.073423 4940 generic.go:334] "Generic (PLEG): container finished" podID="2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0" containerID="692b7f6b5a6fae85d510904f18bdb96e61928df38c229ed708243de614fd39f0" exitCode=0 Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.073662 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" event={"ID":"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0","Type":"ContainerDied","Data":"692b7f6b5a6fae85d510904f18bdb96e61928df38c229ed708243de614fd39f0"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.081308 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" event={"ID":"e93e70f9-30a0-4254-8cc9-2988b9028297","Type":"ContainerStarted","Data":"7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.081950 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.083942 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" event={"ID":"a3dddcb1-02be-4ac7-8da5-b83e8552a74f","Type":"ContainerStarted","Data":"d094111e34edeca4be8be648c8a19dd655f7b6db04b77aa7a46b241305661b53"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.086358 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" event={"ID":"5e8df8d5-008b-4af5-9b01-13bf857e1ac8","Type":"ContainerStarted","Data":"42b5b257d6e6c2b2fba60abc552fc5b684726825339b3998be17483fa2c7c8e8"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.098453 4940 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-8pf88 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.098508 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" podUID="e93e70f9-30a0-4254-8cc9-2988b9028297" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.098992 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" event={"ID":"7455a54f-ad8c-4bbb-8b56-4e68848ff102","Type":"ContainerStarted","Data":"38cd934d068122f768892583b9557706cbbbda932f7ad0a9ccf57839b508c1a5"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.103542 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" event={"ID":"2ac8adae-3926-4197-a3ff-2efa92ef82b5","Type":"ContainerStarted","Data":"10636b1c070222c8f559c427233f4749f0ccc464ae61ef93324d7e415b24fb2d"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.106089 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" event={"ID":"988ea40c-2af0-4145-af3b-42d26d0e94a2","Type":"ContainerStarted","Data":"e22115df2bee2cbd63627e743b844c2a9ae818982bbdb6f967ba37ff60761c53"} Nov 26 06:56:59 crc kubenswrapper[4940]: W1126 06:56:59.107146 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37976138_9ec7_4db0_943e_aaa3e84c96d6.slice/crio-845cd45a75d5e9e89aa7f3b1e3656e2d7d8db923a079463cc40156a6c40d1354 WatchSource:0}: Error finding container 845cd45a75d5e9e89aa7f3b1e3656e2d7d8db923a079463cc40156a6c40d1354: Status 404 returned error can't find the container with id 845cd45a75d5e9e89aa7f3b1e3656e2d7d8db923a079463cc40156a6c40d1354 Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.117387 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.118593 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.618576758 +0000 UTC m=+121.138718377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.120627 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" event={"ID":"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0","Type":"ContainerStarted","Data":"7556bf1682d0386e34c038df486784924373652f799b180cca4fe8c06cbdbfb2"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.122650 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-sb94j"] Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.128312 4940 generic.go:334] "Generic (PLEG): container finished" podID="60b8d39a-f9d8-423e-9a88-2842a04d96a4" containerID="adcd3184aa9a736c1dd900c5f35de0f1cc54006db8600f2344c571e3b2741872" exitCode=0 Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.128388 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" event={"ID":"60b8d39a-f9d8-423e-9a88-2842a04d96a4","Type":"ContainerDied","Data":"adcd3184aa9a736c1dd900c5f35de0f1cc54006db8600f2344c571e3b2741872"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.137463 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" event={"ID":"02eb3fbf-9bcf-4097-80da-07430ae0cceb","Type":"ContainerStarted","Data":"8d99d28270dfeda5cf906bb4e2770b2b0711df78bd34bcecf0b6a03204e706f4"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.142853 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" event={"ID":"67c31318-36b0-451f-b849-8859adaebd3f","Type":"ContainerStarted","Data":"ef92f3a1b2806200f1dda286f56c7fe9f51867b188b7f46c052dcb0460f8061e"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.145977 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" event={"ID":"e942613a-a952-4447-b337-ec6045783c9f","Type":"ContainerStarted","Data":"00e3a81e76a00b784bb6d4dfd4b746cca280b59b1ea632425021f36ac2ddb2c6"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.179786 4940 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-2kv29 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" start-of-body= Nov 26 06:56:59 crc kubenswrapper[4940]: W1126 06:56:59.180307 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod647d3086_00d3_476f_879f_dd922b1c1313.slice/crio-ef3416e1b7ad4f9fad03b578ff3f080d13ebc70f143a3f4ccec525f01d8dfd31 WatchSource:0}: Error finding container ef3416e1b7ad4f9fad03b578ff3f080d13ebc70f143a3f4ccec525f01d8dfd31: Status 404 returned error can't find the container with id ef3416e1b7ad4f9fad03b578ff3f080d13ebc70f143a3f4ccec525f01d8dfd31 Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.180127 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" podUID="7bd56d38-cc74-420e-ab79-f16c8d36638f" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.184992 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" event={"ID":"7bd56d38-cc74-420e-ab79-f16c8d36638f","Type":"ContainerStarted","Data":"477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.186653 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" event={"ID":"92684e7e-cfae-421b-b356-5cb5d6b38879","Type":"ContainerStarted","Data":"6fdf2551a975934069685161fd3588e8626403015888a45e8b7445816e39c753"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.186766 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.187657 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" event={"ID":"01aa8f48-cf86-43a7-874c-cc3cfddbef46","Type":"ContainerStarted","Data":"30747a2b4f1d61b904e3e2b84ef7f0a5cf277528f72b64c4e65b8ba723784ae3"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.193252 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" event={"ID":"f6bef010-718c-468f-bc70-2424cd10e735","Type":"ContainerStarted","Data":"83cd923b885c7e96d9c7a624b41c02dab825a7a926e31e9b2da12a4960230083"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.207211 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" event={"ID":"c6566c85-0449-4bdc-b9ce-3fb02da651dd","Type":"ContainerStarted","Data":"fb0b12b7a03d27896b6df1db945ab59624ada4ec2cea8cf4f02b0cbc14323491"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.208592 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.209553 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" event={"ID":"9579b457-bdfb-49b5-b12a-c14e87593922","Type":"ContainerStarted","Data":"95dbe11f5d169622c8a4ae8eb741e900b442c97cffcaea2ed77ddfd5fb5a6598"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.210925 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-42nbc" event={"ID":"22919f3b-67d1-4b9a-a0fd-476ba56e9918","Type":"ContainerStarted","Data":"439e6ed9f0ef7dad8de5ebdc70210aa763908688d09a6857c448e59bde27690f"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.214095 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-bflcx" event={"ID":"831a57b7-e553-4c53-a658-b10d4183d514","Type":"ContainerStarted","Data":"1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb"} Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.220533 4940 patch_prober.go:28] interesting pod/console-operator-58897d9998-t5zc5 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.220584 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" podUID="c6566c85-0449-4bdc-b9ce-3fb02da651dd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.221179 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.224481 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.724464333 +0000 UTC m=+121.244605952 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.304415 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-qbt7f"] Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.321869 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.322027 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.822001103 +0000 UTC m=+121.342142722 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.322295 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.327288 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.827267171 +0000 UTC m=+121.347408990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.356031 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-s22s9" podStartSLOduration=99.356014087 podStartE2EDuration="1m39.356014087s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:59.35576697 +0000 UTC m=+120.875908589" watchObservedRunningTime="2025-11-26 06:56:59.356014087 +0000 UTC m=+120.876155706" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.422606 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg"] Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.423457 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.423778 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:56:59.923762137 +0000 UTC m=+121.443903756 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.456846 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-24dgg"] Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.525536 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.526587 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" podStartSLOduration=99.526569535 podStartE2EDuration="1m39.526569535s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:59.526030317 +0000 UTC m=+121.046171956" watchObservedRunningTime="2025-11-26 06:56:59.526569535 +0000 UTC m=+121.046711154" Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.526729 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.026689247 +0000 UTC m=+121.546830856 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.545264 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9x9dq"] Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.550410 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q"] Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.560884 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-42nbc" podStartSLOduration=99.560868147 podStartE2EDuration="1m39.560868147s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:59.560525316 +0000 UTC m=+121.080666935" watchObservedRunningTime="2025-11-26 06:56:59.560868147 +0000 UTC m=+121.081009766" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.599715 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" podStartSLOduration=99.599696025 podStartE2EDuration="1m39.599696025s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:59.597545136 +0000 UTC m=+121.117686755" watchObservedRunningTime="2025-11-26 06:56:59.599696025 +0000 UTC m=+121.119837644" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.626203 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.626487 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.126463359 +0000 UTC m=+121.646604978 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.639329 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" podStartSLOduration=100.639311428 podStartE2EDuration="1m40.639311428s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:59.633776181 +0000 UTC m=+121.153917800" watchObservedRunningTime="2025-11-26 06:56:59.639311428 +0000 UTC m=+121.159453047" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.644610 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.648261 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.648330 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 26 06:56:59 crc kubenswrapper[4940]: W1126 06:56:59.659970 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3fc66d85_3d35_4198_9854_c26921a576a6.slice/crio-314d8ac94b4ca638e14388d2bda6109f214a80b90761e2b5a7d708c6268ccb7a WatchSource:0}: Error finding container 314d8ac94b4ca638e14388d2bda6109f214a80b90761e2b5a7d708c6268ccb7a: Status 404 returned error can't find the container with id 314d8ac94b4ca638e14388d2bda6109f214a80b90761e2b5a7d708c6268ccb7a Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.679923 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-jpvjs" podStartSLOduration=99.679907762 podStartE2EDuration="1m39.679907762s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:59.679096196 +0000 UTC m=+121.199237815" watchObservedRunningTime="2025-11-26 06:56:59.679907762 +0000 UTC m=+121.200049381" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.717499 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-bflcx" podStartSLOduration=100.717480469 podStartE2EDuration="1m40.717480469s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:56:59.716332623 +0000 UTC m=+121.236474242" watchObservedRunningTime="2025-11-26 06:56:59.717480469 +0000 UTC m=+121.237622088" Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.727750 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.728049 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.228022806 +0000 UTC m=+121.748164425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.829018 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.829579 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.329563762 +0000 UTC m=+121.849705381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:56:59 crc kubenswrapper[4940]: I1126 06:56:59.936617 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:56:59 crc kubenswrapper[4940]: E1126 06:56:59.937054 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.437024788 +0000 UTC m=+121.957166407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.037745 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-s55tc" podStartSLOduration=100.037729928 podStartE2EDuration="1m40.037729928s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.035440135 +0000 UTC m=+121.555581754" watchObservedRunningTime="2025-11-26 06:57:00.037729928 +0000 UTC m=+121.557871547" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.040582 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.040694 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.540675042 +0000 UTC m=+122.060816661 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.041233 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.041769 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.541759617 +0000 UTC m=+122.061901236 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.145935 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.146649 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.646620519 +0000 UTC m=+122.166762138 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.187283 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-9f8vx" podStartSLOduration=100.187261825 podStartE2EDuration="1m40.187261825s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.134613846 +0000 UTC m=+121.654755475" watchObservedRunningTime="2025-11-26 06:57:00.187261825 +0000 UTC m=+121.707403444" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.250495 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.250942 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.750927725 +0000 UTC m=+122.271069344 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.256469 4940 generic.go:334] "Generic (PLEG): container finished" podID="67c31318-36b0-451f-b849-8859adaebd3f" containerID="9b0e77cfc11a9c05e473cf9905f7f4eee6dd9bf14bef9178347518b2ba575176" exitCode=0 Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.256542 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" event={"ID":"67c31318-36b0-451f-b849-8859adaebd3f","Type":"ContainerDied","Data":"9b0e77cfc11a9c05e473cf9905f7f4eee6dd9bf14bef9178347518b2ba575176"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.315178 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" event={"ID":"37976138-9ec7-4db0-943e-aaa3e84c96d6","Type":"ContainerStarted","Data":"845cd45a75d5e9e89aa7f3b1e3656e2d7d8db923a079463cc40156a6c40d1354"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.319727 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-24dgg" event={"ID":"e6611854-e2f1-449b-bcdd-fde4684968d5","Type":"ContainerStarted","Data":"6da12f8a8e96e5b381fbbb189b1ede34cd0ad77c5c60367dea55cd41ce0e73b9"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.323295 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" event={"ID":"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde","Type":"ContainerStarted","Data":"b633ffd5b47ba4fa17f2e3bf9eca22b91de215b480d454ef73f6794cdd089f9a"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.328701 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" event={"ID":"5d5ee928-c507-4716-9b87-3175c6a3ab44","Type":"ContainerStarted","Data":"5c7992cc6b30943d7e2456ceb570d789024708ca99ca4b211fede1dfbb7adc53"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.329297 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.336209 4940 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-5dkdx container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.337109 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" podUID="5d5ee928-c507-4716-9b87-3175c6a3ab44" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.350328 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" event={"ID":"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821","Type":"ContainerStarted","Data":"516ccb797b910018c3399c5e97fd4aebfe1aff592bcf520062137d315a4e8996"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.353338 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.353903 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.853877176 +0000 UTC m=+122.374018795 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.368067 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" podStartSLOduration=100.368051348 podStartE2EDuration="1m40.368051348s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.36656881 +0000 UTC m=+121.886710429" watchObservedRunningTime="2025-11-26 06:57:00.368051348 +0000 UTC m=+121.888192967" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.375480 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" event={"ID":"01aa8f48-cf86-43a7-874c-cc3cfddbef46","Type":"ContainerStarted","Data":"1493cf26b26d621594b150cb148ad3d9235f40ff6d5602e202edd26051c7c606"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.377378 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" event={"ID":"a3dddcb1-02be-4ac7-8da5-b83e8552a74f","Type":"ContainerStarted","Data":"fed77896048f2eccecbdf3f7e76f47eb8a9957afffafe8ca97797156fa1f4202"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.388985 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" event={"ID":"413a35f9-b30d-4825-92ea-ff4164212404","Type":"ContainerStarted","Data":"5438bbd3e168e86e168589c76369bd0d10fe3c4522c7706a14c8ceef45039978"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.389033 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" event={"ID":"413a35f9-b30d-4825-92ea-ff4164212404","Type":"ContainerStarted","Data":"0ef45e3dc6d2967d715a7ea2dfdc69579c6e04a52c54547051c54e99dda21293"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.401697 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" event={"ID":"4befe9c4-9c54-4bea-9798-6b908273b93c","Type":"ContainerStarted","Data":"b5069e3de1377508e047c8d281269dfaf02b69e369b1c0ae51155c4881b7a39e"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.406355 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-5tcqw" podStartSLOduration=100.406340388 podStartE2EDuration="1m40.406340388s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.405802291 +0000 UTC m=+121.925943910" watchObservedRunningTime="2025-11-26 06:57:00.406340388 +0000 UTC m=+121.926482007" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.417348 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" event={"ID":"2ffea777-f433-4e58-a670-8f39f3db893e","Type":"ContainerStarted","Data":"9904f630662f400085060023b6e2e01db5d0a7ae045ba3a732e3a67bebcfb0df"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.424755 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" event={"ID":"434b74d5-9895-4254-8d8a-17fec36577ab","Type":"ContainerStarted","Data":"e9f2cd7af7f3000b1f019cf5a93e94f2ce5609871806d594839a3da030676025"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.424807 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" event={"ID":"434b74d5-9895-4254-8d8a-17fec36577ab","Type":"ContainerStarted","Data":"dcc0cf18400cc6dae0997a8591ce0429965d71d88aa904d20ea19873cea3215d"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.431882 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jk4hv" podStartSLOduration=100.431865562 podStartE2EDuration="1m40.431865562s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.430690165 +0000 UTC m=+121.950831784" watchObservedRunningTime="2025-11-26 06:57:00.431865562 +0000 UTC m=+121.952007181" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.432090 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" event={"ID":"988ea40c-2af0-4145-af3b-42d26d0e94a2","Type":"ContainerStarted","Data":"82d8ea0a5a12e707acb1d6074d06633173236b9588d866edc30da00c91cb7f95"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.447378 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" event={"ID":"5e8df8d5-008b-4af5-9b01-13bf857e1ac8","Type":"ContainerStarted","Data":"9e57f2be36a2cb227fd36c58d782e2edf121b7716f0d651d40e8f79bfce5e022"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.454537 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.455248 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:00.955211186 +0000 UTC m=+122.475352805 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.457941 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" event={"ID":"0b6a5b0d-3f28-4bc2-95a9-8bdc82ff2eb5","Type":"ContainerStarted","Data":"25df7994a4112b8c35860dd27285b45397b444729d94bb56bc3fab1ed4207e01"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.459581 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" podStartSLOduration=101.459569515 podStartE2EDuration="1m41.459569515s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.458653576 +0000 UTC m=+121.978795195" watchObservedRunningTime="2025-11-26 06:57:00.459569515 +0000 UTC m=+121.979711124" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.484468 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" event={"ID":"02eb3fbf-9bcf-4097-80da-07430ae0cceb","Type":"ContainerStarted","Data":"be7991bc3e3f1529562d4326c7356a7a1859958962ff3e3f6d2325aa7766ddec"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.485291 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.485844 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-92kwm" podStartSLOduration=101.485834963 podStartE2EDuration="1m41.485834963s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.485498502 +0000 UTC m=+122.005640121" watchObservedRunningTime="2025-11-26 06:57:00.485834963 +0000 UTC m=+122.005976582" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.494247 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-sb94j" event={"ID":"647d3086-00d3-476f-879f-dd922b1c1313","Type":"ContainerStarted","Data":"ef3416e1b7ad4f9fad03b578ff3f080d13ebc70f143a3f4ccec525f01d8dfd31"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.495350 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-sb94j" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.503564 4940 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-ft8qp container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.503855 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" podUID="02eb3fbf-9bcf-4097-80da-07430ae0cceb" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.504993 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" event={"ID":"2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0","Type":"ContainerStarted","Data":"88018b77719ac4cbb934f8f2c64e225111a745e6202ec7d229b5e1c8a949f89b"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.513153 4940 patch_prober.go:28] interesting pod/downloads-7954f5f757-sb94j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.513406 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sb94j" podUID="647d3086-00d3-476f-879f-dd922b1c1313" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.516868 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" event={"ID":"3fc66d85-3d35-4198-9854-c26921a576a6","Type":"ContainerStarted","Data":"314d8ac94b4ca638e14388d2bda6109f214a80b90761e2b5a7d708c6268ccb7a"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.524874 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" event={"ID":"bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0","Type":"ContainerStarted","Data":"c518b6310f72da4335b9a1ea8602a784c4d5772c6f7b8d79d6cf01a50ef3e6d0"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.525334 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.537287 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-qbt7f" event={"ID":"165dbb27-d422-4fe5-9ee0-a2f6b2cff11e","Type":"ContainerStarted","Data":"9f10e52cd1d7dc3495c7aeb6a51d2f34b1dd6c5fe8e44202e73c11eeece36726"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.547077 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-9qfvj" podStartSLOduration=100.547056454 podStartE2EDuration="1m40.547056454s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.520074694 +0000 UTC m=+122.040216343" watchObservedRunningTime="2025-11-26 06:57:00.547056454 +0000 UTC m=+122.067198073" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.549204 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-sb94j" podStartSLOduration=101.549194542 podStartE2EDuration="1m41.549194542s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.546115294 +0000 UTC m=+122.066256923" watchObservedRunningTime="2025-11-26 06:57:00.549194542 +0000 UTC m=+122.069336161" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.549636 4940 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wpzwc container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:5443/healthz\": dial tcp 10.217.0.24:5443: connect: connection refused" start-of-body= Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.549723 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" podUID="bfcb72ad-4dbb-438e-b4b0-6ffe78ff3ab0" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.24:5443/healthz\": dial tcp 10.217.0.24:5443: connect: connection refused" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.553936 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" event={"ID":"036446d1-5a1d-40f2-be92-7a8950c3efff","Type":"ContainerStarted","Data":"e24a43048d1805700edc2a7bfcaa655c2632549404569e2ed70ff5f58de1e838"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.556362 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.557759 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.057743985 +0000 UTC m=+122.577885604 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.566549 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" event={"ID":"01c54419-24df-4a02-9671-7457e0540ca7","Type":"ContainerStarted","Data":"67270a5a2ad2df90c4f31780434604518c62bb20820fa6e9138794e3508e1bf0"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.566600 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" event={"ID":"01c54419-24df-4a02-9671-7457e0540ca7","Type":"ContainerStarted","Data":"b8dcdfae9ad64f5c7aa39a4ca71771f340fd0e82ce63414c68cb6f013117bef3"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.590872 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" event={"ID":"c43a8a22-cd03-4112-96ab-e8a7bd2819ef","Type":"ContainerStarted","Data":"fbead104774f01ac5d3eea197b94ca06ac3392a312600aec42f4097e8dd9f503"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.594685 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" podStartSLOduration=100.594661992 podStartE2EDuration="1m40.594661992s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.587482813 +0000 UTC m=+122.107624432" watchObservedRunningTime="2025-11-26 06:57:00.594661992 +0000 UTC m=+122.114803611" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.599548 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" event={"ID":"2d74f8fa-d746-4920-ac46-e6a4a7a501f6","Type":"ContainerStarted","Data":"3d73fc0b4542bc15ecd5f59a2ec01fa07ad275a13009fb6d6ba1921ebfa060cd"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.599592 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" event={"ID":"2d74f8fa-d746-4920-ac46-e6a4a7a501f6","Type":"ContainerStarted","Data":"b32600b831b78c1bc0ee6997dcfa51b9e8aa3a7ce4aee6e8bd2469230ad5ac16"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.607351 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" podStartSLOduration=101.607327925 podStartE2EDuration="1m41.607327925s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.606970324 +0000 UTC m=+122.127111943" watchObservedRunningTime="2025-11-26 06:57:00.607327925 +0000 UTC m=+122.127469544" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.620879 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" event={"ID":"e942613a-a952-4447-b337-ec6045783c9f","Type":"ContainerStarted","Data":"ab68f3bc8fa93d6e020f9120b99fef028648b62663d259f088808a3edbf12527"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.635496 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-q4hnr" event={"ID":"0bd15561-d728-45b6-a33a-e9b3d0e8e5db","Type":"ContainerStarted","Data":"ded4358e3a8744743c402a793709d26fd9e566a5fabd42b454cbfd50668eef2d"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.635551 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-q4hnr" event={"ID":"0bd15561-d728-45b6-a33a-e9b3d0e8e5db","Type":"ContainerStarted","Data":"5fa0abc695c94b978a4f65bfba1f7ad0ed05e79cd18b220340bc3feea5311f62"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.639550 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gt48v" podStartSLOduration=100.639521882 podStartE2EDuration="1m40.639521882s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.63100008 +0000 UTC m=+122.151141699" watchObservedRunningTime="2025-11-26 06:57:00.639521882 +0000 UTC m=+122.159663501" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.648175 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" podStartSLOduration=100.648153147 podStartE2EDuration="1m40.648153147s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.647591249 +0000 UTC m=+122.167732868" watchObservedRunningTime="2025-11-26 06:57:00.648153147 +0000 UTC m=+122.168294766" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.649346 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" event={"ID":"f6bef010-718c-468f-bc70-2424cd10e735","Type":"ContainerStarted","Data":"4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650"} Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.649391 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.663704 4940 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-294qn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.663772 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" podUID="f6bef010-718c-468f-bc70-2424cd10e735" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.663844 4940 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-8pf88 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.663941 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" podUID="e93e70f9-30a0-4254-8cc9-2988b9028297" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.664495 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.669673 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.169651232 +0000 UTC m=+122.689792851 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.671830 4940 patch_prober.go:28] interesting pod/console-operator-58897d9998-t5zc5 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.671873 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" podUID="c6566c85-0449-4bdc-b9ce-3fb02da651dd" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.678136 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:00 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:00 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:00 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.678189 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.687825 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j7x6x" podStartSLOduration=100.687805461 podStartE2EDuration="1m40.687805461s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.68556603 +0000 UTC m=+122.205707649" watchObservedRunningTime="2025-11-26 06:57:00.687805461 +0000 UTC m=+122.207947080" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.758177 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-q4hnr" podStartSLOduration=6.758159464 podStartE2EDuration="6.758159464s" podCreationTimestamp="2025-11-26 06:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.756340975 +0000 UTC m=+122.276482594" watchObservedRunningTime="2025-11-26 06:57:00.758159464 +0000 UTC m=+122.278301083" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.760532 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-8dv5b" podStartSLOduration=100.760523019 podStartE2EDuration="1m40.760523019s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.71506853 +0000 UTC m=+122.235210139" watchObservedRunningTime="2025-11-26 06:57:00.760523019 +0000 UTC m=+122.280664638" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.765432 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.769685 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.269657149 +0000 UTC m=+122.789798938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.837131 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-fw4tw" podStartSLOduration=100.837099119 podStartE2EDuration="1m40.837099119s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.801582268 +0000 UTC m=+122.321723887" watchObservedRunningTime="2025-11-26 06:57:00.837099119 +0000 UTC m=+122.357240738" Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.870354 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.870787 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.370772153 +0000 UTC m=+122.890913772 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.971954 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.972207 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.472177855 +0000 UTC m=+122.992319474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:00 crc kubenswrapper[4940]: I1126 06:57:00.972626 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:00 crc kubenswrapper[4940]: E1126 06:57:00.972970 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.472959251 +0000 UTC m=+122.993100870 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.074383 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.074744 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.574727195 +0000 UTC m=+123.094868814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.175581 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.175986 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.675969522 +0000 UTC m=+123.196111141 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.276551 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.276863 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.776812436 +0000 UTC m=+123.296954055 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.277097 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.277410 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.777397975 +0000 UTC m=+123.297539594 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.378418 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.378594 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.87856537 +0000 UTC m=+123.398706989 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.378649 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.379134 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.879127378 +0000 UTC m=+123.399268997 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.480392 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.480581 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.980555712 +0000 UTC m=+123.500697331 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.480711 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.481116 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:01.981109698 +0000 UTC m=+123.501251317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.582067 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.582571 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.082522871 +0000 UTC m=+123.602664550 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.646195 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:01 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:01 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:01 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.646249 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.654463 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" event={"ID":"7e0fb7a9-3242-44dd-956d-f85a3f5f1cde","Type":"ContainerStarted","Data":"78029645a231fb3cf7bebdecaa4db66911aaf9d4867f58bb31c8b3fd7639fd3d"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.656336 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" event={"ID":"67c31318-36b0-451f-b849-8859adaebd3f","Type":"ContainerStarted","Data":"6b3f299f011448b21038e7ef7cffee1fc2b6037df2dcb552fade7b186a680cf3"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.657777 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" event={"ID":"3fc66d85-3d35-4198-9854-c26921a576a6","Type":"ContainerStarted","Data":"56216564a1e1702f73654806d94aadf6b08adceae5257a9f136cff51e04e5e95"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.657949 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.659100 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-sb94j" event={"ID":"647d3086-00d3-476f-879f-dd922b1c1313","Type":"ContainerStarted","Data":"ee9efafa188eaac4863357c118c9ef26be9a4893456b96f4a93058f403e54528"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.659454 4940 patch_prober.go:28] interesting pod/downloads-7954f5f757-sb94j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.659519 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sb94j" podUID="647d3086-00d3-476f-879f-dd922b1c1313" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.660376 4940 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-mn29q container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.660401 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" podUID="3fc66d85-3d35-4198-9854-c26921a576a6" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.661264 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-qbt7f" event={"ID":"165dbb27-d422-4fe5-9ee0-a2f6b2cff11e","Type":"ContainerStarted","Data":"ee4947d922e7f04a13fb23c0f516bd8c9c165d80b657d0d7e573b515112c38dd"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.662822 4940 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-2kv29 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": context deadline exceeded" start-of-body= Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.662867 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" podUID="7bd56d38-cc74-420e-ab79-f16c8d36638f" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": context deadline exceeded" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.663239 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" event={"ID":"413a35f9-b30d-4825-92ea-ff4164212404","Type":"ContainerStarted","Data":"0c07388b26974c754db043f94f9b9786db015549dc17074afbc58460145d6a2f"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.663365 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.665322 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" event={"ID":"036446d1-5a1d-40f2-be92-7a8950c3efff","Type":"ContainerStarted","Data":"3bb910d8739760f77d65840dbcf70b234afeab9c232e44957938a8a44bb84087"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.665446 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" event={"ID":"036446d1-5a1d-40f2-be92-7a8950c3efff","Type":"ContainerStarted","Data":"1d308ea0d6887f243c2b352190c0c36f0b57aee330e8d048c7e08931ed95778b"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.667389 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" event={"ID":"a3dddcb1-02be-4ac7-8da5-b83e8552a74f","Type":"ContainerStarted","Data":"29158fe3eaa0f51a278fce5b1250bd64d532838bb94e6c4063d2576e498904c9"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.669212 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" event={"ID":"988ea40c-2af0-4145-af3b-42d26d0e94a2","Type":"ContainerStarted","Data":"b94b5780071ffac46aa0df5b8fa559951645a8f1214bc07f3e79ab912fb4b650"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.683603 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.686423 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" podStartSLOduration=101.686407453 podStartE2EDuration="1m41.686407453s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:00.849250917 +0000 UTC m=+122.369392536" watchObservedRunningTime="2025-11-26 06:57:01.686407453 +0000 UTC m=+123.206549072" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.688556 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4tvq7" podStartSLOduration=102.688549071 podStartE2EDuration="1m42.688549071s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.6853654 +0000 UTC m=+123.205507019" watchObservedRunningTime="2025-11-26 06:57:01.688549071 +0000 UTC m=+123.208690690" Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.689679 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.189660987 +0000 UTC m=+123.709802606 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.691324 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" event={"ID":"37976138-9ec7-4db0-943e-aaa3e84c96d6","Type":"ContainerStarted","Data":"ce88873fe9ae16dd30a7be8c9edc59ddabbe8fa80d345bb020fb577e639de0dd"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.691358 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" event={"ID":"37976138-9ec7-4db0-943e-aaa3e84c96d6","Type":"ContainerStarted","Data":"1365c70c3d2a405f8bc9de37722fc9bb9d461242fc79f01a2a7fc057f1917287"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.699205 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" event={"ID":"2ac8adae-3926-4197-a3ff-2efa92ef82b5","Type":"ContainerStarted","Data":"90ce546e996c1473db73fb663f18feeae6ab209156f0a7d278485f5a5e3f1676"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.703208 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" event={"ID":"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821","Type":"ContainerStarted","Data":"19e13249f3289ee0b9ea23bc9835bf6ad675bf8588b76d46278df254a4a83e07"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.703265 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" event={"ID":"bf6ea3c8-972d-4d8b-b0aa-058ceea6c821","Type":"ContainerStarted","Data":"7bed7a7ce5fd49c0bb68bb93b0423fe42f5608df100b559c7350a14122d66080"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.706598 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-24dgg" event={"ID":"e6611854-e2f1-449b-bcdd-fde4684968d5","Type":"ContainerStarted","Data":"edef6c4a7ed616ca850fa23ef7b073c41444f04da320e1dfdcfefa40faf3b306"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.706643 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-24dgg" event={"ID":"e6611854-e2f1-449b-bcdd-fde4684968d5","Type":"ContainerStarted","Data":"2e60eec0455eda624aeedc4a92ca0e7333ad2082fbb9c73754ecb595d953ed4e"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.707181 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-24dgg" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.709865 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" event={"ID":"60b8d39a-f9d8-423e-9a88-2842a04d96a4","Type":"ContainerStarted","Data":"2ceccec16c58335561bc9132f7b3538b2f4929210158af0fb336f7443e4f9852"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.709893 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" event={"ID":"60b8d39a-f9d8-423e-9a88-2842a04d96a4","Type":"ContainerStarted","Data":"14745233ab8e38a85b29817af37f57857be5361ed3f3267179169d175d45e244"} Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.711389 4940 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-294qn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.711432 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" podUID="f6bef010-718c-468f-bc70-2424cd10e735" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.713217 4940 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-ft8qp container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.713256 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" podUID="02eb3fbf-9bcf-4097-80da-07430ae0cceb" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.742050 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" podStartSLOduration=101.742015276 podStartE2EDuration="1m41.742015276s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.73962589 +0000 UTC m=+123.259767519" watchObservedRunningTime="2025-11-26 06:57:01.742015276 +0000 UTC m=+123.262156905" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.743668 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" podStartSLOduration=101.743659509 podStartE2EDuration="1m41.743659509s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.719791427 +0000 UTC m=+123.239933046" watchObservedRunningTime="2025-11-26 06:57:01.743659509 +0000 UTC m=+123.263801128" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.751216 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkdx" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.754752 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h62ll" podStartSLOduration=101.754736282 podStartE2EDuration="1m41.754736282s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.754131522 +0000 UTC m=+123.274273141" watchObservedRunningTime="2025-11-26 06:57:01.754736282 +0000 UTC m=+123.274877891" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.785160 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.788698 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.288678724 +0000 UTC m=+123.808820343 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.812152 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vf4n5" podStartSLOduration=101.812131871 podStartE2EDuration="1m41.812131871s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.787973961 +0000 UTC m=+123.308115580" watchObservedRunningTime="2025-11-26 06:57:01.812131871 +0000 UTC m=+123.332273490" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.813450 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-qbt7f" podStartSLOduration=7.813442623 podStartE2EDuration="7.813442623s" podCreationTimestamp="2025-11-26 06:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.811410348 +0000 UTC m=+123.331551967" watchObservedRunningTime="2025-11-26 06:57:01.813442623 +0000 UTC m=+123.333584242" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.844277 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.844616 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.845825 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6j7k9" podStartSLOduration=101.845805304 podStartE2EDuration="1m41.845805304s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.844756511 +0000 UTC m=+123.364898130" watchObservedRunningTime="2025-11-26 06:57:01.845805304 +0000 UTC m=+123.365946923" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.852658 4940 patch_prober.go:28] interesting pod/apiserver-76f77b778f-z9v5r container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.11:8443/livez\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.852720 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" podUID="60b8d39a-f9d8-423e-9a88-2842a04d96a4" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.11:8443/livez\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.854286 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.854779 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.863266 4940 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-55f84 container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.14:8443/livez\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.863619 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" podUID="2aa8d709-23ae-46b0-bc3d-e3e66a81e7a0" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.14:8443/livez\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.876892 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" podStartSLOduration=102.876871105 podStartE2EDuration="1m42.876871105s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.873507677 +0000 UTC m=+123.393649296" watchObservedRunningTime="2025-11-26 06:57:01.876871105 +0000 UTC m=+123.397012724" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.888101 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.888484 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.388472735 +0000 UTC m=+123.908614354 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.902803 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" podStartSLOduration=102.902780911 podStartE2EDuration="1m42.902780911s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.902090179 +0000 UTC m=+123.422231798" watchObservedRunningTime="2025-11-26 06:57:01.902780911 +0000 UTC m=+123.422922530" Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.989768 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.989944 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.489897678 +0000 UTC m=+124.010039307 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:01 crc kubenswrapper[4940]: I1126 06:57:01.990597 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:01 crc kubenswrapper[4940]: E1126 06:57:01.991010 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.490996032 +0000 UTC m=+124.011137651 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.006260 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.082958 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-58znw" podStartSLOduration=102.082936543 podStartE2EDuration="1m42.082936543s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:01.995330901 +0000 UTC m=+123.515472520" watchObservedRunningTime="2025-11-26 06:57:02.082936543 +0000 UTC m=+123.603078162" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.083404 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-cttzg" podStartSLOduration=102.083399358 podStartE2EDuration="1m42.083399358s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:02.072369836 +0000 UTC m=+123.592511455" watchObservedRunningTime="2025-11-26 06:57:02.083399358 +0000 UTC m=+123.603540977" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.092188 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.092559 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.59254435 +0000 UTC m=+124.112685969 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.193994 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.194391 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.694373766 +0000 UTC m=+124.214515575 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.267251 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z2dfd" podStartSLOduration=102.267236439 podStartE2EDuration="1m42.267236439s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:02.157256963 +0000 UTC m=+123.677398582" watchObservedRunningTime="2025-11-26 06:57:02.267236439 +0000 UTC m=+123.787378058" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.295141 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.295465 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.795449497 +0000 UTC m=+124.315591116 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.325457 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-24dgg" podStartSLOduration=8.325437024 podStartE2EDuration="8.325437024s" podCreationTimestamp="2025-11-26 06:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:02.322632695 +0000 UTC m=+123.842774304" watchObservedRunningTime="2025-11-26 06:57:02.325437024 +0000 UTC m=+123.845578633" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.396755 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.397193 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.89717349 +0000 UTC m=+124.417315309 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.444279 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wpzwc" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.498318 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.498614 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:02.998599694 +0000 UTC m=+124.518741313 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.600408 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.600783 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.10076523 +0000 UTC m=+124.620906839 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.646098 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:02 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:02 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:02 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.646167 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.701246 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.701457 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.201422829 +0000 UTC m=+124.721564448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.701577 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.701904 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.201892224 +0000 UTC m=+124.722033843 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.714842 4940 generic.go:334] "Generic (PLEG): container finished" podID="434b74d5-9895-4254-8d8a-17fec36577ab" containerID="e9f2cd7af7f3000b1f019cf5a93e94f2ce5609871806d594839a3da030676025" exitCode=0 Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.714945 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" event={"ID":"434b74d5-9895-4254-8d8a-17fec36577ab","Type":"ContainerDied","Data":"e9f2cd7af7f3000b1f019cf5a93e94f2ce5609871806d594839a3da030676025"} Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.717092 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" event={"ID":"4befe9c4-9c54-4bea-9798-6b908273b93c","Type":"ContainerStarted","Data":"89bf87fec76ea5ba3f6ae38e05985a17069fc60364ca42310a38fb8fe6c74edb"} Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.717224 4940 patch_prober.go:28] interesting pod/downloads-7954f5f757-sb94j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.717273 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sb94j" podUID="647d3086-00d3-476f-879f-dd922b1c1313" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.719259 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.723739 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-mn29q" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.760604 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.761473 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.767440 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.782980 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.783637 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.802637 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.802788 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.302767069 +0000 UTC m=+124.822908698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.803335 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.805965 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.305939771 +0000 UTC m=+124.826081390 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.904610 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.904811 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:02 crc kubenswrapper[4940]: I1126 06:57:02.904871 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:02 crc kubenswrapper[4940]: E1126 06:57:02.905131 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.40503564 +0000 UTC m=+124.925177259 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.006813 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.006854 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.006896 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.007359 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.50733341 +0000 UTC m=+125.027475029 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.007567 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.028788 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.082391 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.108272 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.108452 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.608415843 +0000 UTC m=+125.128557462 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.108803 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.109129 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.609121525 +0000 UTC m=+125.129263144 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.210047 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.210136 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.710117535 +0000 UTC m=+125.230259154 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.210448 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.210746 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.710738085 +0000 UTC m=+125.230879704 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.311899 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.312228 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.812212669 +0000 UTC m=+125.332354288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.413725 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.414300 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:03.914287413 +0000 UTC m=+125.434429032 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.427154 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c9xgx"] Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.429990 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.441355 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.462221 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c9xgx"] Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.522467 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.522754 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-utilities\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.522795 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-catalog-content\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.522817 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhqlh\" (UniqueName: \"kubernetes.io/projected/358bc8db-3143-435e-8d21-00fa78fa3029-kube-api-access-vhqlh\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.522908 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.022893975 +0000 UTC m=+125.543035594 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.585252 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n8sjf"] Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.586189 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.617940 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n8sjf"] Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.618084 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.624131 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-utilities\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.624173 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.624194 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-catalog-content\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.624224 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhqlh\" (UniqueName: \"kubernetes.io/projected/358bc8db-3143-435e-8d21-00fa78fa3029-kube-api-access-vhqlh\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.625070 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-utilities\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.625416 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.125405873 +0000 UTC m=+125.645547492 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.625743 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-catalog-content\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.664102 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:03 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:03 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:03 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.664152 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.694719 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhqlh\" (UniqueName: \"kubernetes.io/projected/358bc8db-3143-435e-8d21-00fa78fa3029-kube-api-access-vhqlh\") pod \"certified-operators-c9xgx\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.731165 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.731372 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-utilities\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.731392 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzgfb\" (UniqueName: \"kubernetes.io/projected/1927422c-3af6-418b-ba25-2cbecefd45ad-kube-api-access-qzgfb\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.731645 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-catalog-content\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.731827 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.231800844 +0000 UTC m=+125.751942653 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.781968 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.783191 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" event={"ID":"4befe9c4-9c54-4bea-9798-6b908273b93c","Type":"ContainerStarted","Data":"9b6c9ed8c10029575f7e827d6455a3b0d026a577bb76d5606f5eb9ecf2c64fac"} Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.783221 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" event={"ID":"4befe9c4-9c54-4bea-9798-6b908273b93c","Type":"ContainerStarted","Data":"7cac72d52813de1a68a5ae15caa09c0c2bdf0c44539cb7d453be8a9d59eab59e"} Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.798203 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4n6pd"] Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.799491 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.805341 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4n6pd"] Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.824233 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.834693 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-utilities\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.834731 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzgfb\" (UniqueName: \"kubernetes.io/projected/1927422c-3af6-418b-ba25-2cbecefd45ad-kube-api-access-qzgfb\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.834766 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.834838 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-catalog-content\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.835361 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-catalog-content\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.835650 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-utilities\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.836218 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.336203983 +0000 UTC m=+125.856345602 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.869782 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzgfb\" (UniqueName: \"kubernetes.io/projected/1927422c-3af6-418b-ba25-2cbecefd45ad-kube-api-access-qzgfb\") pod \"community-operators-n8sjf\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.938662 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.938955 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnrdq\" (UniqueName: \"kubernetes.io/projected/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-kube-api-access-mnrdq\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.939155 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-catalog-content\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.939249 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-utilities\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:03 crc kubenswrapper[4940]: E1126 06:57:03.940401 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.440373793 +0000 UTC m=+125.960515572 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:03 crc kubenswrapper[4940]: I1126 06:57:03.981295 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.001053 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vr7rb"] Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.002085 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.040432 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-catalog-content\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.040831 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.040881 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-utilities\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.040948 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnrdq\" (UniqueName: \"kubernetes.io/projected/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-kube-api-access-mnrdq\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.041493 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.541481677 +0000 UTC m=+126.061623296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.041507 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-catalog-content\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.041804 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-utilities\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.048972 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vr7rb"] Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.061673 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4bs4k" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.149640 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.149976 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-utilities\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.150069 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz8bd\" (UniqueName: \"kubernetes.io/projected/64de0c82-8255-4813-a615-e10e81aeede1-kube-api-access-gz8bd\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.150121 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-catalog-content\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.150203 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.650175241 +0000 UTC m=+126.170316860 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.150283 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.150657 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.650650036 +0000 UTC m=+126.170791655 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.155567 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnrdq\" (UniqueName: \"kubernetes.io/projected/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-kube-api-access-mnrdq\") pod \"certified-operators-4n6pd\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.253732 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.254017 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-utilities\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.254106 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz8bd\" (UniqueName: \"kubernetes.io/projected/64de0c82-8255-4813-a615-e10e81aeede1-kube-api-access-gz8bd\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.254145 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-catalog-content\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.254926 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-catalog-content\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.254994 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.754978402 +0000 UTC m=+126.275120021 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.255254 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-utilities\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.296816 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz8bd\" (UniqueName: \"kubernetes.io/projected/64de0c82-8255-4813-a615-e10e81aeede1-kube-api-access-gz8bd\") pod \"community-operators-vr7rb\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.336382 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.356496 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.357792 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.857779319 +0000 UTC m=+126.377920938 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.363846 4940 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.400335 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.449847 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.460466 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/434b74d5-9895-4254-8d8a-17fec36577ab-config-volume\") pod \"434b74d5-9895-4254-8d8a-17fec36577ab\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.460609 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.460637 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/434b74d5-9895-4254-8d8a-17fec36577ab-secret-volume\") pod \"434b74d5-9895-4254-8d8a-17fec36577ab\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.460684 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pq29\" (UniqueName: \"kubernetes.io/projected/434b74d5-9895-4254-8d8a-17fec36577ab-kube-api-access-2pq29\") pod \"434b74d5-9895-4254-8d8a-17fec36577ab\" (UID: \"434b74d5-9895-4254-8d8a-17fec36577ab\") " Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.464786 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:04.96476353 +0000 UTC m=+126.484905149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.464929 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/434b74d5-9895-4254-8d8a-17fec36577ab-config-volume" (OuterVolumeSpecName: "config-volume") pod "434b74d5-9895-4254-8d8a-17fec36577ab" (UID: "434b74d5-9895-4254-8d8a-17fec36577ab"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.479592 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/434b74d5-9895-4254-8d8a-17fec36577ab-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "434b74d5-9895-4254-8d8a-17fec36577ab" (UID: "434b74d5-9895-4254-8d8a-17fec36577ab"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.480075 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/434b74d5-9895-4254-8d8a-17fec36577ab-kube-api-access-2pq29" (OuterVolumeSpecName: "kube-api-access-2pq29") pod "434b74d5-9895-4254-8d8a-17fec36577ab" (UID: "434b74d5-9895-4254-8d8a-17fec36577ab"). InnerVolumeSpecName "kube-api-access-2pq29". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.561775 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.561909 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/434b74d5-9895-4254-8d8a-17fec36577ab-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.561920 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/434b74d5-9895-4254-8d8a-17fec36577ab-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.561929 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pq29\" (UniqueName: \"kubernetes.io/projected/434b74d5-9895-4254-8d8a-17fec36577ab-kube-api-access-2pq29\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.562314 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.062299209 +0000 UTC m=+126.582440828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.654225 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:04 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:04 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:04 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.654277 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.662737 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.663051 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.163017119 +0000 UTC m=+126.683158738 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.663160 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.663567 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.163559577 +0000 UTC m=+126.683701196 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.764876 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.765372 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.265353311 +0000 UTC m=+126.785494930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.781911 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n8sjf"] Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.860095 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" event={"ID":"4befe9c4-9c54-4bea-9798-6b908273b93c","Type":"ContainerStarted","Data":"20c8b5dbb5084ad61b95e4345e55be219f719de14f1db6ca7cb8c78582514ce7"} Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.864401 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c9xgx"] Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.867894 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.868261 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.368249801 +0000 UTC m=+126.888391420 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.875263 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" event={"ID":"434b74d5-9895-4254-8d8a-17fec36577ab","Type":"ContainerDied","Data":"dcc0cf18400cc6dae0997a8591ce0429965d71d88aa904d20ea19873cea3215d"} Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.875397 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dcc0cf18400cc6dae0997a8591ce0429965d71d88aa904d20ea19873cea3215d" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.875711 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.885228 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736","Type":"ContainerStarted","Data":"1b65917726aa0b80669a520544d6841c365e4a0bdbfccc00df22dc8549048973"} Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.927019 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-9x9dq" podStartSLOduration=10.927005225 podStartE2EDuration="10.927005225s" podCreationTimestamp="2025-11-26 06:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:04.924822024 +0000 UTC m=+126.444963643" watchObservedRunningTime="2025-11-26 06:57:04.927005225 +0000 UTC m=+126.447146844" Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.968897 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.972433 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.472401631 +0000 UTC m=+126.992543250 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:04 crc kubenswrapper[4940]: I1126 06:57:04.972725 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:04 crc kubenswrapper[4940]: E1126 06:57:04.973007 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.47300088 +0000 UTC m=+126.993142499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.077174 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:05 crc kubenswrapper[4940]: E1126 06:57:05.077657 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.577634336 +0000 UTC m=+127.097775955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.080349 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4n6pd"] Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.137983 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vr7rb"] Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.180298 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:05 crc kubenswrapper[4940]: E1126 06:57:05.181170 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.681072333 +0000 UTC m=+127.201213952 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.282186 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:05 crc kubenswrapper[4940]: E1126 06:57:05.282540 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.782518357 +0000 UTC m=+127.302659976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.282823 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:05 crc kubenswrapper[4940]: E1126 06:57:05.283230 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:57:05.783215719 +0000 UTC m=+127.303357338 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sv7gt" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.342875 4940 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-26T06:57:04.363870143Z","Handler":null,"Name":""} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.345649 4940 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.345681 4940 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.383834 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.397244 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.484991 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.487729 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.487769 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.534831 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sv7gt\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.579026 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nn64p"] Nov 26 06:57:05 crc kubenswrapper[4940]: E1126 06:57:05.579595 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="434b74d5-9895-4254-8d8a-17fec36577ab" containerName="collect-profiles" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.579678 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="434b74d5-9895-4254-8d8a-17fec36577ab" containerName="collect-profiles" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.579927 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="434b74d5-9895-4254-8d8a-17fec36577ab" containerName="collect-profiles" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.581140 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.585789 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.641800 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nn64p"] Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.647506 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:05 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:05 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:05 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.647865 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.687463 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbgb4\" (UniqueName: \"kubernetes.io/projected/965f184e-b9c1-4f5a-a51e-c5e36466002d-kube-api-access-bbgb4\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.687529 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-utilities\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.687618 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-catalog-content\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.789215 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbgb4\" (UniqueName: \"kubernetes.io/projected/965f184e-b9c1-4f5a-a51e-c5e36466002d-kube-api-access-bbgb4\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.789267 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-utilities\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.789317 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-catalog-content\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.789885 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-catalog-content\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.790533 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-utilities\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.814992 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbgb4\" (UniqueName: \"kubernetes.io/projected/965f184e-b9c1-4f5a-a51e-c5e36466002d-kube-api-access-bbgb4\") pod \"redhat-marketplace-nn64p\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.838723 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.847344 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.895465 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.909834 4940 generic.go:334] "Generic (PLEG): container finished" podID="e653739e-f6bf-4bb4-9b62-0d7d5ffb0736" containerID="f54edd7dac9843b53e5345a5d4af751e7860bf05c79ea530ad467e7110812fff" exitCode=0 Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.910187 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736","Type":"ContainerDied","Data":"f54edd7dac9843b53e5345a5d4af751e7860bf05c79ea530ad467e7110812fff"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.912110 4940 generic.go:334] "Generic (PLEG): container finished" podID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerID="ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4" exitCode=0 Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.912172 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n6pd" event={"ID":"5263e4cf-d56e-46cf-bc5b-dfcad517fb81","Type":"ContainerDied","Data":"ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.912199 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n6pd" event={"ID":"5263e4cf-d56e-46cf-bc5b-dfcad517fb81","Type":"ContainerStarted","Data":"35d937d96e7a6bd00fd17c3c6314fabb6378686895334e833399e3d3dc1414e4"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.916220 4940 generic.go:334] "Generic (PLEG): container finished" podID="358bc8db-3143-435e-8d21-00fa78fa3029" containerID="9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313" exitCode=0 Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.916251 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9xgx" event={"ID":"358bc8db-3143-435e-8d21-00fa78fa3029","Type":"ContainerDied","Data":"9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.916314 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9xgx" event={"ID":"358bc8db-3143-435e-8d21-00fa78fa3029","Type":"ContainerStarted","Data":"7032b5fe8da6019e36bdb8d2d9b3ed0a607726f3bf61c7b8761f5189bc0bec6b"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.919536 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.925461 4940 generic.go:334] "Generic (PLEG): container finished" podID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerID="6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438" exitCode=0 Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.925831 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n8sjf" event={"ID":"1927422c-3af6-418b-ba25-2cbecefd45ad","Type":"ContainerDied","Data":"6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.925861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n8sjf" event={"ID":"1927422c-3af6-418b-ba25-2cbecefd45ad","Type":"ContainerStarted","Data":"45d3336f5556f20cb62d0942a1ad7c514b563d2bb6a400b0755dd2ec5217a475"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.929641 4940 generic.go:334] "Generic (PLEG): container finished" podID="64de0c82-8255-4813-a615-e10e81aeede1" containerID="d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3" exitCode=0 Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.930479 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vr7rb" event={"ID":"64de0c82-8255-4813-a615-e10e81aeede1","Type":"ContainerDied","Data":"d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.930513 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vr7rb" event={"ID":"64de0c82-8255-4813-a615-e10e81aeede1","Type":"ContainerStarted","Data":"90c1591f5cd9e9b968ff9f7bb5367283ec5428836d9a3da14ab6f41914b8393f"} Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.987872 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p7sfk"] Nov 26 06:57:05 crc kubenswrapper[4940]: I1126 06:57:05.994977 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.006567 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p7sfk"] Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.097786 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-catalog-content\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.097909 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-utilities\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.097973 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjjhd\" (UniqueName: \"kubernetes.io/projected/7b1f2008-3b1a-4665-8192-64c225577aea-kube-api-access-gjjhd\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.162346 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sv7gt"] Nov 26 06:57:06 crc kubenswrapper[4940]: W1126 06:57:06.167340 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podefe355d7_3ba1_451e_aebd_271c367e186c.slice/crio-2e58b7e9eb9c2e01c234e4bdac8839b7d91e1215c43eccf6ee91b0a1cf48c07f WatchSource:0}: Error finding container 2e58b7e9eb9c2e01c234e4bdac8839b7d91e1215c43eccf6ee91b0a1cf48c07f: Status 404 returned error can't find the container with id 2e58b7e9eb9c2e01c234e4bdac8839b7d91e1215c43eccf6ee91b0a1cf48c07f Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.204932 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-utilities\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.205032 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjjhd\" (UniqueName: \"kubernetes.io/projected/7b1f2008-3b1a-4665-8192-64c225577aea-kube-api-access-gjjhd\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.205154 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-catalog-content\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.205598 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-catalog-content\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.205666 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-utilities\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.209829 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nn64p"] Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.225748 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjjhd\" (UniqueName: \"kubernetes.io/projected/7b1f2008-3b1a-4665-8192-64c225577aea-kube-api-access-gjjhd\") pod \"redhat-marketplace-p7sfk\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: W1126 06:57:06.232219 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod965f184e_b9c1_4f5a_a51e_c5e36466002d.slice/crio-9673054b3faa945a2fc78d894c6833256b301cb1b110d6b1f425a61e0dffacfc WatchSource:0}: Error finding container 9673054b3faa945a2fc78d894c6833256b301cb1b110d6b1f425a61e0dffacfc: Status 404 returned error can't find the container with id 9673054b3faa945a2fc78d894c6833256b301cb1b110d6b1f425a61e0dffacfc Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.321788 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.539003 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p7sfk"] Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.572365 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xkfvk"] Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.573793 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.575665 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.586826 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xkfvk"] Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.645125 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:06 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:06 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:06 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.645185 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.713498 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-catalog-content\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.713645 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-utilities\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.713688 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qsc9\" (UniqueName: \"kubernetes.io/projected/9fd9ac3b-989c-49b5-b377-e63aef7fd979-kube-api-access-5qsc9\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.815256 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-utilities\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.815335 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qsc9\" (UniqueName: \"kubernetes.io/projected/9fd9ac3b-989c-49b5-b377-e63aef7fd979-kube-api-access-5qsc9\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.815435 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-catalog-content\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.815798 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-utilities\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.815989 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-catalog-content\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.829898 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-t5zc5" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.834171 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qsc9\" (UniqueName: \"kubernetes.io/projected/9fd9ac3b-989c-49b5-b377-e63aef7fd979-kube-api-access-5qsc9\") pod \"redhat-operators-xkfvk\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.860479 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.864215 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.866552 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-z9v5r" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.871392 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-55f84" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.941322 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" event={"ID":"efe355d7-3ba1-451e-aebd-271c367e186c","Type":"ContainerStarted","Data":"a876ed16c09326ce9b2409438a1818e2d5dec88020d8fc0cfaa071b6f74bede0"} Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.941366 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" event={"ID":"efe355d7-3ba1-451e-aebd-271c367e186c","Type":"ContainerStarted","Data":"2e58b7e9eb9c2e01c234e4bdac8839b7d91e1215c43eccf6ee91b0a1cf48c07f"} Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.941414 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.959320 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.982272 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p7sfk" event={"ID":"7b1f2008-3b1a-4665-8192-64c225577aea","Type":"ContainerStarted","Data":"4bad84f7c923d3acc366a0df54344e2581f42f1b8f66ca9aede2f87df681eb74"} Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.989971 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p856r"] Nov 26 06:57:06 crc kubenswrapper[4940]: I1126 06:57:06.992792 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:06.996871 4940 generic.go:334] "Generic (PLEG): container finished" podID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerID="87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02" exitCode=0 Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:06.997554 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nn64p" event={"ID":"965f184e-b9c1-4f5a-a51e-c5e36466002d","Type":"ContainerDied","Data":"87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02"} Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:06.997620 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nn64p" event={"ID":"965f184e-b9c1-4f5a-a51e-c5e36466002d","Type":"ContainerStarted","Data":"9673054b3faa945a2fc78d894c6833256b301cb1b110d6b1f425a61e0dffacfc"} Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.007494 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p856r"] Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.011978 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" podStartSLOduration=107.011956647 podStartE2EDuration="1m47.011956647s" podCreationTimestamp="2025-11-26 06:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:07.008487256 +0000 UTC m=+128.528628895" watchObservedRunningTime="2025-11-26 06:57:07.011956647 +0000 UTC m=+128.532098266" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.018386 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.018417 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.041661 4940 patch_prober.go:28] interesting pod/console-f9d7485db-bflcx container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.5:8443/health\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.041878 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-bflcx" podUID="831a57b7-e553-4c53-a658-b10d4183d514" containerName="console" probeResult="failure" output="Get \"https://10.217.0.5:8443/health\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.080588 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.106852 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.157106 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bb2l8\" (UniqueName: \"kubernetes.io/projected/245d45a4-2775-4470-a35b-92b1c870fd35-kube-api-access-bb2l8\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.157292 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-utilities\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.157709 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-catalog-content\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.198463 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.198939 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.259758 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-utilities\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.259960 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-catalog-content\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.260029 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bb2l8\" (UniqueName: \"kubernetes.io/projected/245d45a4-2775-4470-a35b-92b1c870fd35-kube-api-access-bb2l8\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.261317 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-catalog-content\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.262658 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-utilities\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.308734 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.309651 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.312401 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.313058 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.326738 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.348436 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bb2l8\" (UniqueName: \"kubernetes.io/projected/245d45a4-2775-4470-a35b-92b1c870fd35-kube-api-access-bb2l8\") pod \"redhat-operators-p856r\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.356425 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.464798 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.465106 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.508228 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.543095 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xkfvk"] Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.567157 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kubelet-dir\") pod \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\" (UID: \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\") " Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.567319 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kube-api-access\") pod \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\" (UID: \"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736\") " Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.567417 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e653739e-f6bf-4bb4-9b62-0d7d5ffb0736" (UID: "e653739e-f6bf-4bb4-9b62-0d7d5ffb0736"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.567956 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.568105 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.568229 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.568283 4940 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.572425 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e653739e-f6bf-4bb4-9b62-0d7d5ffb0736" (UID: "e653739e-f6bf-4bb4-9b62-0d7d5ffb0736"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.585517 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.643344 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.646072 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:07 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:07 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:07 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.646116 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.662413 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.670392 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e653739e-f6bf-4bb4-9b62-0d7d5ffb0736-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.706015 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p856r"] Nov 26 06:57:07 crc kubenswrapper[4940]: W1126 06:57:07.736292 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod245d45a4_2775_4470_a35b_92b1c870fd35.slice/crio-7dc5e0f17e386a0adbf999c801d38ba18c548ec3b291e4418bb3fbe59b9323b5 WatchSource:0}: Error finding container 7dc5e0f17e386a0adbf999c801d38ba18c548ec3b291e4418bb3fbe59b9323b5: Status 404 returned error can't find the container with id 7dc5e0f17e386a0adbf999c801d38ba18c548ec3b291e4418bb3fbe59b9323b5 Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.929816 4940 patch_prober.go:28] interesting pod/downloads-7954f5f757-sb94j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.929865 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-sb94j" podUID="647d3086-00d3-476f-879f-dd922b1c1313" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.929942 4940 patch_prober.go:28] interesting pod/downloads-7954f5f757-sb94j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.929972 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sb94j" podUID="647d3086-00d3-476f-879f-dd922b1c1313" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" Nov 26 06:57:07 crc kubenswrapper[4940]: I1126 06:57:07.949359 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 06:57:07 crc kubenswrapper[4940]: W1126 06:57:07.967745 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod4d6b3bdc_1843_46fd_adc6_e697f613d9ac.slice/crio-2d8151a187f7c7178ec1e97158964f634228c9a35f67bf315d0630d7119fabda WatchSource:0}: Error finding container 2d8151a187f7c7178ec1e97158964f634228c9a35f67bf315d0630d7119fabda: Status 404 returned error can't find the container with id 2d8151a187f7c7178ec1e97158964f634228c9a35f67bf315d0630d7119fabda Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.010939 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.010957 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e653739e-f6bf-4bb4-9b62-0d7d5ffb0736","Type":"ContainerDied","Data":"1b65917726aa0b80669a520544d6841c365e4a0bdbfccc00df22dc8549048973"} Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.011000 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b65917726aa0b80669a520544d6841c365e4a0bdbfccc00df22dc8549048973" Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.014128 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p856r" event={"ID":"245d45a4-2775-4470-a35b-92b1c870fd35","Type":"ContainerStarted","Data":"94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78"} Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.014164 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p856r" event={"ID":"245d45a4-2775-4470-a35b-92b1c870fd35","Type":"ContainerStarted","Data":"7dc5e0f17e386a0adbf999c801d38ba18c548ec3b291e4418bb3fbe59b9323b5"} Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.016511 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4d6b3bdc-1843-46fd-adc6-e697f613d9ac","Type":"ContainerStarted","Data":"2d8151a187f7c7178ec1e97158964f634228c9a35f67bf315d0630d7119fabda"} Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.018810 4940 generic.go:334] "Generic (PLEG): container finished" podID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerID="d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee" exitCode=0 Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.018964 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xkfvk" event={"ID":"9fd9ac3b-989c-49b5-b377-e63aef7fd979","Type":"ContainerDied","Data":"d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee"} Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.018986 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xkfvk" event={"ID":"9fd9ac3b-989c-49b5-b377-e63aef7fd979","Type":"ContainerStarted","Data":"649f3221cba63e93c9375cf7796db9bbf1f2d279ec23be1b02e78878645df9d8"} Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.021549 4940 generic.go:334] "Generic (PLEG): container finished" podID="7b1f2008-3b1a-4665-8192-64c225577aea" containerID="c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87" exitCode=0 Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.022560 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p7sfk" event={"ID":"7b1f2008-3b1a-4665-8192-64c225577aea","Type":"ContainerDied","Data":"c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87"} Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.647175 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:08 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:08 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:08 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:08 crc kubenswrapper[4940]: I1126 06:57:08.647501 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:09 crc kubenswrapper[4940]: I1126 06:57:09.035619 4940 generic.go:334] "Generic (PLEG): container finished" podID="245d45a4-2775-4470-a35b-92b1c870fd35" containerID="94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78" exitCode=0 Nov 26 06:57:09 crc kubenswrapper[4940]: I1126 06:57:09.036209 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p856r" event={"ID":"245d45a4-2775-4470-a35b-92b1c870fd35","Type":"ContainerDied","Data":"94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78"} Nov 26 06:57:09 crc kubenswrapper[4940]: I1126 06:57:09.041537 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4d6b3bdc-1843-46fd-adc6-e697f613d9ac","Type":"ContainerStarted","Data":"857aa506efb27a8fe7325e8978f9210935d41c1108f9708f1f29c1f36b1235d4"} Nov 26 06:57:09 crc kubenswrapper[4940]: I1126 06:57:09.062743 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.062728239 podStartE2EDuration="2.062728239s" podCreationTimestamp="2025-11-26 06:57:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:09.062518462 +0000 UTC m=+130.582660081" watchObservedRunningTime="2025-11-26 06:57:09.062728239 +0000 UTC m=+130.582869858" Nov 26 06:57:09 crc kubenswrapper[4940]: I1126 06:57:09.644926 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:09 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:09 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:09 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:09 crc kubenswrapper[4940]: I1126 06:57:09.644984 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:10 crc kubenswrapper[4940]: I1126 06:57:10.046791 4940 generic.go:334] "Generic (PLEG): container finished" podID="4d6b3bdc-1843-46fd-adc6-e697f613d9ac" containerID="857aa506efb27a8fe7325e8978f9210935d41c1108f9708f1f29c1f36b1235d4" exitCode=0 Nov 26 06:57:10 crc kubenswrapper[4940]: I1126 06:57:10.046836 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4d6b3bdc-1843-46fd-adc6-e697f613d9ac","Type":"ContainerDied","Data":"857aa506efb27a8fe7325e8978f9210935d41c1108f9708f1f29c1f36b1235d4"} Nov 26 06:57:10 crc kubenswrapper[4940]: I1126 06:57:10.645419 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:10 crc kubenswrapper[4940]: [-]has-synced failed: reason withheld Nov 26 06:57:10 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:10 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:10 crc kubenswrapper[4940]: I1126 06:57:10.645791 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:11 crc kubenswrapper[4940]: I1126 06:57:11.645194 4940 patch_prober.go:28] interesting pod/router-default-5444994796-42nbc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:57:11 crc kubenswrapper[4940]: [+]has-synced ok Nov 26 06:57:11 crc kubenswrapper[4940]: [+]process-running ok Nov 26 06:57:11 crc kubenswrapper[4940]: healthz check failed Nov 26 06:57:11 crc kubenswrapper[4940]: I1126 06:57:11.645276 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-42nbc" podUID="22919f3b-67d1-4b9a-a0fd-476ba56e9918" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:57:12 crc kubenswrapper[4940]: I1126 06:57:12.353288 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 06:57:12 crc kubenswrapper[4940]: I1126 06:57:12.645133 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:57:12 crc kubenswrapper[4940]: I1126 06:57:12.648322 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-42nbc" Nov 26 06:57:13 crc kubenswrapper[4940]: I1126 06:57:13.075570 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-24dgg" Nov 26 06:57:17 crc kubenswrapper[4940]: I1126 06:57:17.019287 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:57:17 crc kubenswrapper[4940]: I1126 06:57:17.023130 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-bflcx" Nov 26 06:57:17 crc kubenswrapper[4940]: I1126 06:57:17.930060 4940 patch_prober.go:28] interesting pod/downloads-7954f5f757-sb94j container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Nov 26 06:57:17 crc kubenswrapper[4940]: I1126 06:57:17.930118 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-sb94j" podUID="647d3086-00d3-476f-879f-dd922b1c1313" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" Nov 26 06:57:17 crc kubenswrapper[4940]: I1126 06:57:17.930175 4940 patch_prober.go:28] interesting pod/downloads-7954f5f757-sb94j container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Nov 26 06:57:17 crc kubenswrapper[4940]: I1126 06:57:17.930364 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-sb94j" podUID="647d3086-00d3-476f-879f-dd922b1c1313" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.38:8080/\": dial tcp 10.217.0.38:8080: connect: connection refused" Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.100370 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.177476 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4d6b3bdc-1843-46fd-adc6-e697f613d9ac","Type":"ContainerDied","Data":"2d8151a187f7c7178ec1e97158964f634228c9a35f67bf315d0630d7119fabda"} Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.177526 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d8151a187f7c7178ec1e97158964f634228c9a35f67bf315d0630d7119fabda" Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.177620 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.267388 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kubelet-dir\") pod \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\" (UID: \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\") " Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.267461 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kube-api-access\") pod \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\" (UID: \"4d6b3bdc-1843-46fd-adc6-e697f613d9ac\") " Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.267526 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4d6b3bdc-1843-46fd-adc6-e697f613d9ac" (UID: "4d6b3bdc-1843-46fd-adc6-e697f613d9ac"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.267726 4940 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.287220 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4d6b3bdc-1843-46fd-adc6-e697f613d9ac" (UID: "4d6b3bdc-1843-46fd-adc6-e697f613d9ac"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:57:18 crc kubenswrapper[4940]: I1126 06:57:18.368627 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4d6b3bdc-1843-46fd-adc6-e697f613d9ac-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:21 crc kubenswrapper[4940]: I1126 06:57:21.729136 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:57:21 crc kubenswrapper[4940]: I1126 06:57:21.729531 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:57:25 crc kubenswrapper[4940]: I1126 06:57:25.859328 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.100826 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.100875 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.100915 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.100941 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.103846 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.103899 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.105301 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.112335 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.116661 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.118159 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.125292 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.125591 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.288338 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.297858 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.409502 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:57:27 crc kubenswrapper[4940]: I1126 06:57:27.943351 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-sb94j" Nov 26 06:57:36 crc kubenswrapper[4940]: E1126 06:57:36.414102 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 06:57:36 crc kubenswrapper[4940]: E1126 06:57:36.414694 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qzgfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-n8sjf_openshift-marketplace(1927422c-3af6-418b-ba25-2cbecefd45ad): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 06:57:36 crc kubenswrapper[4940]: E1126 06:57:36.416114 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-n8sjf" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" Nov 26 06:57:37 crc kubenswrapper[4940]: I1126 06:57:37.671650 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dshqv" Nov 26 06:57:37 crc kubenswrapper[4940]: E1126 06:57:37.916000 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-n8sjf" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.024144 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.024935 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vhqlh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-c9xgx_openshift-marketplace(358bc8db-3143-435e-8d21-00fa78fa3029): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.027135 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-c9xgx" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.029904 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.030034 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mnrdq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-4n6pd_openshift-marketplace(5263e4cf-d56e-46cf-bc5b-dfcad517fb81): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.031517 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-4n6pd" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.070934 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.071311 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gz8bd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vr7rb_openshift-marketplace(64de0c82-8255-4813-a615-e10e81aeede1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.072890 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vr7rb" podUID="64de0c82-8255-4813-a615-e10e81aeede1" Nov 26 06:57:38 crc kubenswrapper[4940]: I1126 06:57:38.308415 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p856r" event={"ID":"245d45a4-2775-4470-a35b-92b1c870fd35","Type":"ContainerStarted","Data":"6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7"} Nov 26 06:57:38 crc kubenswrapper[4940]: I1126 06:57:38.310695 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xkfvk" event={"ID":"9fd9ac3b-989c-49b5-b377-e63aef7fd979","Type":"ContainerStarted","Data":"ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0"} Nov 26 06:57:38 crc kubenswrapper[4940]: I1126 06:57:38.323959 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p7sfk" event={"ID":"7b1f2008-3b1a-4665-8192-64c225577aea","Type":"ContainerStarted","Data":"4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf"} Nov 26 06:57:38 crc kubenswrapper[4940]: I1126 06:57:38.349356 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nn64p" event={"ID":"965f184e-b9c1-4f5a-a51e-c5e36466002d","Type":"ContainerStarted","Data":"089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550"} Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.352863 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-c9xgx" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.353461 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-vr7rb" podUID="64de0c82-8255-4813-a615-e10e81aeede1" Nov 26 06:57:38 crc kubenswrapper[4940]: E1126 06:57:38.353567 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-4n6pd" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" Nov 26 06:57:38 crc kubenswrapper[4940]: W1126 06:57:38.482845 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-853c9beb1f7f6eeb92b5ce47ea7490ed960c61e1517c0338be7f988dcc29d5cf WatchSource:0}: Error finding container 853c9beb1f7f6eeb92b5ce47ea7490ed960c61e1517c0338be7f988dcc29d5cf: Status 404 returned error can't find the container with id 853c9beb1f7f6eeb92b5ce47ea7490ed960c61e1517c0338be7f988dcc29d5cf Nov 26 06:57:38 crc kubenswrapper[4940]: W1126 06:57:38.543319 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-beff5728b3cf761b81a36f319e00523a94a55a6766c19b945511bf58337304b0 WatchSource:0}: Error finding container beff5728b3cf761b81a36f319e00523a94a55a6766c19b945511bf58337304b0: Status 404 returned error can't find the container with id beff5728b3cf761b81a36f319e00523a94a55a6766c19b945511bf58337304b0 Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.356712 4940 generic.go:334] "Generic (PLEG): container finished" podID="7b1f2008-3b1a-4665-8192-64c225577aea" containerID="4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf" exitCode=0 Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.356880 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p7sfk" event={"ID":"7b1f2008-3b1a-4665-8192-64c225577aea","Type":"ContainerDied","Data":"4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.359826 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"66270167aae32a7a79c302b155e6dd862e06b7ccbeaf2b6c12058f57dec9d869"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.359852 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"1f50fe1f58fbabb59501d0fe30038b37491ce9d14f40e34ed93d73e884cb58d5"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.360259 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.362511 4940 generic.go:334] "Generic (PLEG): container finished" podID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerID="089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550" exitCode=0 Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.362552 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nn64p" event={"ID":"965f184e-b9c1-4f5a-a51e-c5e36466002d","Type":"ContainerDied","Data":"089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.362567 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nn64p" event={"ID":"965f184e-b9c1-4f5a-a51e-c5e36466002d","Type":"ContainerStarted","Data":"edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.363858 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"19211930c77bfe17802e7bd7d9dd603eeb03e660b516ad1343284a1782f45a2f"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.363890 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"853c9beb1f7f6eeb92b5ce47ea7490ed960c61e1517c0338be7f988dcc29d5cf"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.365331 4940 generic.go:334] "Generic (PLEG): container finished" podID="245d45a4-2775-4470-a35b-92b1c870fd35" containerID="6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7" exitCode=0 Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.365361 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p856r" event={"ID":"245d45a4-2775-4470-a35b-92b1c870fd35","Type":"ContainerDied","Data":"6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.366528 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"d5d86cb10732421048d53311780dfd8273e50693b913f546d2eea3ee7c500356"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.366564 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"beff5728b3cf761b81a36f319e00523a94a55a6766c19b945511bf58337304b0"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.370411 4940 generic.go:334] "Generic (PLEG): container finished" podID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerID="ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0" exitCode=0 Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.370448 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xkfvk" event={"ID":"9fd9ac3b-989c-49b5-b377-e63aef7fd979","Type":"ContainerDied","Data":"ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0"} Nov 26 06:57:39 crc kubenswrapper[4940]: I1126 06:57:39.390120 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nn64p" podStartSLOduration=2.5962100599999998 podStartE2EDuration="34.390106092s" podCreationTimestamp="2025-11-26 06:57:05 +0000 UTC" firstStartedPulling="2025-11-26 06:57:07.013477945 +0000 UTC m=+128.533619564" lastFinishedPulling="2025-11-26 06:57:38.807373957 +0000 UTC m=+160.327515596" observedRunningTime="2025-11-26 06:57:39.387702816 +0000 UTC m=+160.907844455" watchObservedRunningTime="2025-11-26 06:57:39.390106092 +0000 UTC m=+160.910247711" Nov 26 06:57:40 crc kubenswrapper[4940]: I1126 06:57:40.379683 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xkfvk" event={"ID":"9fd9ac3b-989c-49b5-b377-e63aef7fd979","Type":"ContainerStarted","Data":"a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5"} Nov 26 06:57:40 crc kubenswrapper[4940]: I1126 06:57:40.381844 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p7sfk" event={"ID":"7b1f2008-3b1a-4665-8192-64c225577aea","Type":"ContainerStarted","Data":"cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f"} Nov 26 06:57:40 crc kubenswrapper[4940]: I1126 06:57:40.385140 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p856r" event={"ID":"245d45a4-2775-4470-a35b-92b1c870fd35","Type":"ContainerStarted","Data":"3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e"} Nov 26 06:57:40 crc kubenswrapper[4940]: I1126 06:57:40.400108 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xkfvk" podStartSLOduration=2.552724723 podStartE2EDuration="34.400061618s" podCreationTimestamp="2025-11-26 06:57:06 +0000 UTC" firstStartedPulling="2025-11-26 06:57:08.020163026 +0000 UTC m=+129.540304635" lastFinishedPulling="2025-11-26 06:57:39.867499911 +0000 UTC m=+161.387641530" observedRunningTime="2025-11-26 06:57:40.399488979 +0000 UTC m=+161.919630598" watchObservedRunningTime="2025-11-26 06:57:40.400061618 +0000 UTC m=+161.920203237" Nov 26 06:57:40 crc kubenswrapper[4940]: I1126 06:57:40.443824 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p7sfk" podStartSLOduration=3.640417147 podStartE2EDuration="35.443808422s" podCreationTimestamp="2025-11-26 06:57:05 +0000 UTC" firstStartedPulling="2025-11-26 06:57:08.023347266 +0000 UTC m=+129.543488885" lastFinishedPulling="2025-11-26 06:57:39.826738531 +0000 UTC m=+161.346880160" observedRunningTime="2025-11-26 06:57:40.414821048 +0000 UTC m=+161.934962667" watchObservedRunningTime="2025-11-26 06:57:40.443808422 +0000 UTC m=+161.963950041" Nov 26 06:57:40 crc kubenswrapper[4940]: I1126 06:57:40.444768 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p856r" podStartSLOduration=3.682339591 podStartE2EDuration="34.444764303s" podCreationTimestamp="2025-11-26 06:57:06 +0000 UTC" firstStartedPulling="2025-11-26 06:57:09.037847306 +0000 UTC m=+130.557988925" lastFinishedPulling="2025-11-26 06:57:39.800272018 +0000 UTC m=+161.320413637" observedRunningTime="2025-11-26 06:57:40.442799429 +0000 UTC m=+161.962941068" watchObservedRunningTime="2025-11-26 06:57:40.444764303 +0000 UTC m=+161.964905912" Nov 26 06:57:41 crc kubenswrapper[4940]: I1126 06:57:41.510294 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:57:41 crc kubenswrapper[4940]: I1126 06:57:41.511905 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 06:57:41 crc kubenswrapper[4940]: I1126 06:57:41.525301 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b1a21dbc-3d52-4bc2-805b-65dc954babce-metrics-certs\") pod \"network-metrics-daemon-nfh6j\" (UID: \"b1a21dbc-3d52-4bc2-805b-65dc954babce\") " pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:57:41 crc kubenswrapper[4940]: I1126 06:57:41.795481 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 06:57:41 crc kubenswrapper[4940]: I1126 06:57:41.804511 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nfh6j" Nov 26 06:57:42 crc kubenswrapper[4940]: I1126 06:57:42.208641 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-nfh6j"] Nov 26 06:57:42 crc kubenswrapper[4940]: I1126 06:57:42.395742 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" event={"ID":"b1a21dbc-3d52-4bc2-805b-65dc954babce","Type":"ContainerStarted","Data":"14a46ba29fd5218fa1e41d4742f96201a96092866f19c6a42f3a026d304fb432"} Nov 26 06:57:43 crc kubenswrapper[4940]: I1126 06:57:43.403317 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" event={"ID":"b1a21dbc-3d52-4bc2-805b-65dc954babce","Type":"ContainerStarted","Data":"e12f6a16a5f97ac6c5933d5718e148863ae4e671c0bddde645d485d10cca038e"} Nov 26 06:57:43 crc kubenswrapper[4940]: I1126 06:57:43.403531 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nfh6j" event={"ID":"b1a21dbc-3d52-4bc2-805b-65dc954babce","Type":"ContainerStarted","Data":"5249a5630b5e1653dc36efa0661d05b673b183fd4d87b94d727ac45f6d5afe3e"} Nov 26 06:57:43 crc kubenswrapper[4940]: I1126 06:57:43.420473 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-nfh6j" podStartSLOduration=144.420456639 podStartE2EDuration="2m24.420456639s" podCreationTimestamp="2025-11-26 06:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:57:43.417004159 +0000 UTC m=+164.937145778" watchObservedRunningTime="2025-11-26 06:57:43.420456639 +0000 UTC m=+164.940598258" Nov 26 06:57:45 crc kubenswrapper[4940]: I1126 06:57:45.896133 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:45 crc kubenswrapper[4940]: I1126 06:57:45.897053 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:46 crc kubenswrapper[4940]: I1126 06:57:46.050372 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:46 crc kubenswrapper[4940]: I1126 06:57:46.323159 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:46 crc kubenswrapper[4940]: I1126 06:57:46.323218 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:46 crc kubenswrapper[4940]: I1126 06:57:46.375538 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:46 crc kubenswrapper[4940]: I1126 06:57:46.455795 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:57:46 crc kubenswrapper[4940]: I1126 06:57:46.455857 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:46 crc kubenswrapper[4940]: I1126 06:57:46.960911 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:46 crc kubenswrapper[4940]: I1126 06:57:46.960970 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:47 crc kubenswrapper[4940]: I1126 06:57:47.024134 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:47 crc kubenswrapper[4940]: I1126 06:57:47.357941 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:47 crc kubenswrapper[4940]: I1126 06:57:47.357982 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:47 crc kubenswrapper[4940]: I1126 06:57:47.413177 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:47 crc kubenswrapper[4940]: I1126 06:57:47.469369 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:47 crc kubenswrapper[4940]: I1126 06:57:47.469609 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:57:47 crc kubenswrapper[4940]: I1126 06:57:47.482191 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p7sfk"] Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.235359 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2kv29"] Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.427714 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p7sfk" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" containerName="registry-server" containerID="cri-o://cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f" gracePeriod=2 Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.796164 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.906262 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-catalog-content\") pod \"7b1f2008-3b1a-4665-8192-64c225577aea\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.906415 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjjhd\" (UniqueName: \"kubernetes.io/projected/7b1f2008-3b1a-4665-8192-64c225577aea-kube-api-access-gjjhd\") pod \"7b1f2008-3b1a-4665-8192-64c225577aea\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.906447 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-utilities\") pod \"7b1f2008-3b1a-4665-8192-64c225577aea\" (UID: \"7b1f2008-3b1a-4665-8192-64c225577aea\") " Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.907677 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-utilities" (OuterVolumeSpecName: "utilities") pod "7b1f2008-3b1a-4665-8192-64c225577aea" (UID: "7b1f2008-3b1a-4665-8192-64c225577aea"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.907885 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.913846 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b1f2008-3b1a-4665-8192-64c225577aea-kube-api-access-gjjhd" (OuterVolumeSpecName: "kube-api-access-gjjhd") pod "7b1f2008-3b1a-4665-8192-64c225577aea" (UID: "7b1f2008-3b1a-4665-8192-64c225577aea"). InnerVolumeSpecName "kube-api-access-gjjhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:57:48 crc kubenswrapper[4940]: I1126 06:57:48.932336 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7b1f2008-3b1a-4665-8192-64c225577aea" (UID: "7b1f2008-3b1a-4665-8192-64c225577aea"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.009191 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b1f2008-3b1a-4665-8192-64c225577aea-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.009228 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjjhd\" (UniqueName: \"kubernetes.io/projected/7b1f2008-3b1a-4665-8192-64c225577aea-kube-api-access-gjjhd\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.281826 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p856r"] Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.452708 4940 generic.go:334] "Generic (PLEG): container finished" podID="7b1f2008-3b1a-4665-8192-64c225577aea" containerID="cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f" exitCode=0 Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.453001 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p7sfk" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.453092 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p7sfk" event={"ID":"7b1f2008-3b1a-4665-8192-64c225577aea","Type":"ContainerDied","Data":"cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f"} Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.453121 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p856r" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" containerName="registry-server" containerID="cri-o://3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e" gracePeriod=2 Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.453139 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p7sfk" event={"ID":"7b1f2008-3b1a-4665-8192-64c225577aea","Type":"ContainerDied","Data":"4bad84f7c923d3acc366a0df54344e2581f42f1b8f66ca9aede2f87df681eb74"} Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.453348 4940 scope.go:117] "RemoveContainer" containerID="cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.474373 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p7sfk"] Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.479326 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p7sfk"] Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.488732 4940 scope.go:117] "RemoveContainer" containerID="4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.505394 4940 scope.go:117] "RemoveContainer" containerID="c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.521070 4940 scope.go:117] "RemoveContainer" containerID="cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f" Nov 26 06:57:49 crc kubenswrapper[4940]: E1126 06:57:49.521528 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f\": container with ID starting with cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f not found: ID does not exist" containerID="cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.521560 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f"} err="failed to get container status \"cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f\": rpc error: code = NotFound desc = could not find container \"cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f\": container with ID starting with cfe68764cccc4f9c6fe5d970fa0b27f65ceaf7f95121e1fe8b7058bae452f66f not found: ID does not exist" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.521601 4940 scope.go:117] "RemoveContainer" containerID="4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf" Nov 26 06:57:49 crc kubenswrapper[4940]: E1126 06:57:49.521772 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf\": container with ID starting with 4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf not found: ID does not exist" containerID="4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.521791 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf"} err="failed to get container status \"4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf\": rpc error: code = NotFound desc = could not find container \"4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf\": container with ID starting with 4d6ed60e18953234d2bd3c275f573d5fa152fb24c53a26e99ffd7ffcd9289acf not found: ID does not exist" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.521804 4940 scope.go:117] "RemoveContainer" containerID="c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87" Nov 26 06:57:49 crc kubenswrapper[4940]: E1126 06:57:49.521963 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87\": container with ID starting with c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87 not found: ID does not exist" containerID="c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87" Nov 26 06:57:49 crc kubenswrapper[4940]: I1126 06:57:49.521980 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87"} err="failed to get container status \"c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87\": rpc error: code = NotFound desc = could not find container \"c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87\": container with ID starting with c497e0e5c508bcae69ca7aba1438b9f6777874e911df7c351c75658ca8782a87 not found: ID does not exist" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.406874 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.461276 4940 generic.go:334] "Generic (PLEG): container finished" podID="245d45a4-2775-4470-a35b-92b1c870fd35" containerID="3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e" exitCode=0 Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.461344 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p856r" event={"ID":"245d45a4-2775-4470-a35b-92b1c870fd35","Type":"ContainerDied","Data":"3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e"} Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.461499 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p856r" event={"ID":"245d45a4-2775-4470-a35b-92b1c870fd35","Type":"ContainerDied","Data":"7dc5e0f17e386a0adbf999c801d38ba18c548ec3b291e4418bb3fbe59b9323b5"} Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.461520 4940 scope.go:117] "RemoveContainer" containerID="3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.461371 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p856r" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.476317 4940 scope.go:117] "RemoveContainer" containerID="6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.490487 4940 scope.go:117] "RemoveContainer" containerID="94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.509873 4940 scope.go:117] "RemoveContainer" containerID="3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e" Nov 26 06:57:50 crc kubenswrapper[4940]: E1126 06:57:50.510340 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e\": container with ID starting with 3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e not found: ID does not exist" containerID="3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.510382 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e"} err="failed to get container status \"3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e\": rpc error: code = NotFound desc = could not find container \"3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e\": container with ID starting with 3deef4a637f97f4bc32824a740ce023f1417fb8be7470e8a6f9e9224c26e336e not found: ID does not exist" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.510410 4940 scope.go:117] "RemoveContainer" containerID="6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7" Nov 26 06:57:50 crc kubenswrapper[4940]: E1126 06:57:50.510744 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7\": container with ID starting with 6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7 not found: ID does not exist" containerID="6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.510784 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7"} err="failed to get container status \"6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7\": rpc error: code = NotFound desc = could not find container \"6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7\": container with ID starting with 6b9e89e121c1a5933f114734e928d16b3f14a63217e6c4b5f12a6f93b9f9e1e7 not found: ID does not exist" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.510809 4940 scope.go:117] "RemoveContainer" containerID="94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78" Nov 26 06:57:50 crc kubenswrapper[4940]: E1126 06:57:50.511117 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78\": container with ID starting with 94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78 not found: ID does not exist" containerID="94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.511145 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78"} err="failed to get container status \"94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78\": rpc error: code = NotFound desc = could not find container \"94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78\": container with ID starting with 94de4d9aba2a9d6d6b93751ae001a0087480ecd987f4586563aa66d50a9aef78 not found: ID does not exist" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.525737 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-utilities\") pod \"245d45a4-2775-4470-a35b-92b1c870fd35\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.525788 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bb2l8\" (UniqueName: \"kubernetes.io/projected/245d45a4-2775-4470-a35b-92b1c870fd35-kube-api-access-bb2l8\") pod \"245d45a4-2775-4470-a35b-92b1c870fd35\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.525809 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-catalog-content\") pod \"245d45a4-2775-4470-a35b-92b1c870fd35\" (UID: \"245d45a4-2775-4470-a35b-92b1c870fd35\") " Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.527229 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-utilities" (OuterVolumeSpecName: "utilities") pod "245d45a4-2775-4470-a35b-92b1c870fd35" (UID: "245d45a4-2775-4470-a35b-92b1c870fd35"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.530786 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/245d45a4-2775-4470-a35b-92b1c870fd35-kube-api-access-bb2l8" (OuterVolumeSpecName: "kube-api-access-bb2l8") pod "245d45a4-2775-4470-a35b-92b1c870fd35" (UID: "245d45a4-2775-4470-a35b-92b1c870fd35"). InnerVolumeSpecName "kube-api-access-bb2l8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.617408 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "245d45a4-2775-4470-a35b-92b1c870fd35" (UID: "245d45a4-2775-4470-a35b-92b1c870fd35"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.626802 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.626829 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/245d45a4-2775-4470-a35b-92b1c870fd35-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.626841 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bb2l8\" (UniqueName: \"kubernetes.io/projected/245d45a4-2775-4470-a35b-92b1c870fd35-kube-api-access-bb2l8\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.814961 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p856r"] Nov 26 06:57:50 crc kubenswrapper[4940]: I1126 06:57:50.818047 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p856r"] Nov 26 06:57:51 crc kubenswrapper[4940]: I1126 06:57:51.172984 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" path="/var/lib/kubelet/pods/245d45a4-2775-4470-a35b-92b1c870fd35/volumes" Nov 26 06:57:51 crc kubenswrapper[4940]: I1126 06:57:51.174211 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" path="/var/lib/kubelet/pods/7b1f2008-3b1a-4665-8192-64c225577aea/volumes" Nov 26 06:57:51 crc kubenswrapper[4940]: I1126 06:57:51.466762 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n6pd" event={"ID":"5263e4cf-d56e-46cf-bc5b-dfcad517fb81","Type":"ContainerStarted","Data":"76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771"} Nov 26 06:57:51 crc kubenswrapper[4940]: I1126 06:57:51.469912 4940 generic.go:334] "Generic (PLEG): container finished" podID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerID="3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3" exitCode=0 Nov 26 06:57:51 crc kubenswrapper[4940]: I1126 06:57:51.469988 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n8sjf" event={"ID":"1927422c-3af6-418b-ba25-2cbecefd45ad","Type":"ContainerDied","Data":"3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3"} Nov 26 06:57:51 crc kubenswrapper[4940]: I1126 06:57:51.728107 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:57:51 crc kubenswrapper[4940]: I1126 06:57:51.728344 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:57:52 crc kubenswrapper[4940]: I1126 06:57:52.477576 4940 generic.go:334] "Generic (PLEG): container finished" podID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerID="76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771" exitCode=0 Nov 26 06:57:52 crc kubenswrapper[4940]: I1126 06:57:52.477631 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n6pd" event={"ID":"5263e4cf-d56e-46cf-bc5b-dfcad517fb81","Type":"ContainerDied","Data":"76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771"} Nov 26 06:57:52 crc kubenswrapper[4940]: I1126 06:57:52.485349 4940 generic.go:334] "Generic (PLEG): container finished" podID="358bc8db-3143-435e-8d21-00fa78fa3029" containerID="2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90" exitCode=0 Nov 26 06:57:52 crc kubenswrapper[4940]: I1126 06:57:52.485417 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9xgx" event={"ID":"358bc8db-3143-435e-8d21-00fa78fa3029","Type":"ContainerDied","Data":"2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90"} Nov 26 06:57:52 crc kubenswrapper[4940]: I1126 06:57:52.487475 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n8sjf" event={"ID":"1927422c-3af6-418b-ba25-2cbecefd45ad","Type":"ContainerStarted","Data":"b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6"} Nov 26 06:57:52 crc kubenswrapper[4940]: I1126 06:57:52.523564 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n8sjf" podStartSLOduration=3.5774903670000002 podStartE2EDuration="49.52355027s" podCreationTimestamp="2025-11-26 06:57:03 +0000 UTC" firstStartedPulling="2025-11-26 06:57:05.928396986 +0000 UTC m=+127.448538605" lastFinishedPulling="2025-11-26 06:57:51.874456889 +0000 UTC m=+173.394598508" observedRunningTime="2025-11-26 06:57:52.523215719 +0000 UTC m=+174.043357338" watchObservedRunningTime="2025-11-26 06:57:52.52355027 +0000 UTC m=+174.043691889" Nov 26 06:57:53 crc kubenswrapper[4940]: I1126 06:57:53.493763 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n6pd" event={"ID":"5263e4cf-d56e-46cf-bc5b-dfcad517fb81","Type":"ContainerStarted","Data":"7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896"} Nov 26 06:57:53 crc kubenswrapper[4940]: I1126 06:57:53.496647 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9xgx" event={"ID":"358bc8db-3143-435e-8d21-00fa78fa3029","Type":"ContainerStarted","Data":"ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc"} Nov 26 06:57:53 crc kubenswrapper[4940]: I1126 06:57:53.511357 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4n6pd" podStartSLOduration=3.524361314 podStartE2EDuration="50.511339908s" podCreationTimestamp="2025-11-26 06:57:03 +0000 UTC" firstStartedPulling="2025-11-26 06:57:05.919268635 +0000 UTC m=+127.439410244" lastFinishedPulling="2025-11-26 06:57:52.906247219 +0000 UTC m=+174.426388838" observedRunningTime="2025-11-26 06:57:53.508769546 +0000 UTC m=+175.028911165" watchObservedRunningTime="2025-11-26 06:57:53.511339908 +0000 UTC m=+175.031481517" Nov 26 06:57:53 crc kubenswrapper[4940]: I1126 06:57:53.529053 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c9xgx" podStartSLOduration=3.5697411 podStartE2EDuration="50.529005201s" podCreationTimestamp="2025-11-26 06:57:03 +0000 UTC" firstStartedPulling="2025-11-26 06:57:05.925313308 +0000 UTC m=+127.445454927" lastFinishedPulling="2025-11-26 06:57:52.884577409 +0000 UTC m=+174.404719028" observedRunningTime="2025-11-26 06:57:53.527167823 +0000 UTC m=+175.047309432" watchObservedRunningTime="2025-11-26 06:57:53.529005201 +0000 UTC m=+175.049146820" Nov 26 06:57:53 crc kubenswrapper[4940]: I1126 06:57:53.783214 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:53 crc kubenswrapper[4940]: I1126 06:57:53.783271 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:57:53 crc kubenswrapper[4940]: I1126 06:57:53.982447 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:53 crc kubenswrapper[4940]: I1126 06:57:53.982504 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:54 crc kubenswrapper[4940]: I1126 06:57:54.021474 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:57:54 crc kubenswrapper[4940]: I1126 06:57:54.450264 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:54 crc kubenswrapper[4940]: I1126 06:57:54.450316 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:54 crc kubenswrapper[4940]: I1126 06:57:54.483360 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:57:54 crc kubenswrapper[4940]: I1126 06:57:54.819476 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-c9xgx" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="registry-server" probeResult="failure" output=< Nov 26 06:57:54 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 06:57:54 crc kubenswrapper[4940]: > Nov 26 06:57:55 crc kubenswrapper[4940]: I1126 06:57:55.509693 4940 generic.go:334] "Generic (PLEG): container finished" podID="64de0c82-8255-4813-a615-e10e81aeede1" containerID="01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb" exitCode=0 Nov 26 06:57:55 crc kubenswrapper[4940]: I1126 06:57:55.509905 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vr7rb" event={"ID":"64de0c82-8255-4813-a615-e10e81aeede1","Type":"ContainerDied","Data":"01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb"} Nov 26 06:57:56 crc kubenswrapper[4940]: I1126 06:57:56.517462 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vr7rb" event={"ID":"64de0c82-8255-4813-a615-e10e81aeede1","Type":"ContainerStarted","Data":"787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486"} Nov 26 06:57:56 crc kubenswrapper[4940]: I1126 06:57:56.536149 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vr7rb" podStartSLOduration=3.438663511 podStartE2EDuration="53.53613154s" podCreationTimestamp="2025-11-26 06:57:03 +0000 UTC" firstStartedPulling="2025-11-26 06:57:05.931137423 +0000 UTC m=+127.451279042" lastFinishedPulling="2025-11-26 06:57:56.028605452 +0000 UTC m=+177.548747071" observedRunningTime="2025-11-26 06:57:56.533972612 +0000 UTC m=+178.054114221" watchObservedRunningTime="2025-11-26 06:57:56.53613154 +0000 UTC m=+178.056273159" Nov 26 06:58:03 crc kubenswrapper[4940]: I1126 06:58:03.832202 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:58:03 crc kubenswrapper[4940]: I1126 06:58:03.876454 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:58:04 crc kubenswrapper[4940]: I1126 06:58:04.054303 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:58:04 crc kubenswrapper[4940]: I1126 06:58:04.401997 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:58:04 crc kubenswrapper[4940]: I1126 06:58:04.402372 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:58:04 crc kubenswrapper[4940]: I1126 06:58:04.476920 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:58:04 crc kubenswrapper[4940]: I1126 06:58:04.516590 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:58:04 crc kubenswrapper[4940]: I1126 06:58:04.624174 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.072084 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4n6pd"] Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.072348 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4n6pd" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerName="registry-server" containerID="cri-o://7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896" gracePeriod=2 Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.273612 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vr7rb"] Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.532897 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.576708 4940 generic.go:334] "Generic (PLEG): container finished" podID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerID="7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896" exitCode=0 Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.576809 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n6pd" event={"ID":"5263e4cf-d56e-46cf-bc5b-dfcad517fb81","Type":"ContainerDied","Data":"7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896"} Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.576853 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n6pd" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.576884 4940 scope.go:117] "RemoveContainer" containerID="7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.576867 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n6pd" event={"ID":"5263e4cf-d56e-46cf-bc5b-dfcad517fb81","Type":"ContainerDied","Data":"35d937d96e7a6bd00fd17c3c6314fabb6378686895334e833399e3d3dc1414e4"} Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.601133 4940 scope.go:117] "RemoveContainer" containerID="76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.622253 4940 scope.go:117] "RemoveContainer" containerID="ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.636485 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-utilities\") pod \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.636627 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-catalog-content\") pod \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.636720 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrdq\" (UniqueName: \"kubernetes.io/projected/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-kube-api-access-mnrdq\") pod \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\" (UID: \"5263e4cf-d56e-46cf-bc5b-dfcad517fb81\") " Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.638249 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-utilities" (OuterVolumeSpecName: "utilities") pod "5263e4cf-d56e-46cf-bc5b-dfcad517fb81" (UID: "5263e4cf-d56e-46cf-bc5b-dfcad517fb81"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.643258 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-kube-api-access-mnrdq" (OuterVolumeSpecName: "kube-api-access-mnrdq") pod "5263e4cf-d56e-46cf-bc5b-dfcad517fb81" (UID: "5263e4cf-d56e-46cf-bc5b-dfcad517fb81"). InnerVolumeSpecName "kube-api-access-mnrdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.645433 4940 scope.go:117] "RemoveContainer" containerID="7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896" Nov 26 06:58:06 crc kubenswrapper[4940]: E1126 06:58:06.646175 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896\": container with ID starting with 7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896 not found: ID does not exist" containerID="7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.646220 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896"} err="failed to get container status \"7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896\": rpc error: code = NotFound desc = could not find container \"7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896\": container with ID starting with 7f93bc3a93068b0b829bd3e6c69515af0c9eaf051ca45e63de3a4b3798cd4896 not found: ID does not exist" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.646278 4940 scope.go:117] "RemoveContainer" containerID="76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771" Nov 26 06:58:06 crc kubenswrapper[4940]: E1126 06:58:06.646724 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771\": container with ID starting with 76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771 not found: ID does not exist" containerID="76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.646768 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771"} err="failed to get container status \"76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771\": rpc error: code = NotFound desc = could not find container \"76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771\": container with ID starting with 76fe8836e54bfb79b22b84c02c251650c05bdb0bd1cb32f5193ddf8ff44ab771 not found: ID does not exist" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.646788 4940 scope.go:117] "RemoveContainer" containerID="ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4" Nov 26 06:58:06 crc kubenswrapper[4940]: E1126 06:58:06.647286 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4\": container with ID starting with ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4 not found: ID does not exist" containerID="ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.647326 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4"} err="failed to get container status \"ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4\": rpc error: code = NotFound desc = could not find container \"ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4\": container with ID starting with ff4d2adf2dc06cfa3265ff8150e1216d22e081af74bf6a831aa882b8a60aafe4 not found: ID does not exist" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.697262 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5263e4cf-d56e-46cf-bc5b-dfcad517fb81" (UID: "5263e4cf-d56e-46cf-bc5b-dfcad517fb81"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.738449 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrdq\" (UniqueName: \"kubernetes.io/projected/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-kube-api-access-mnrdq\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.738486 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.738500 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5263e4cf-d56e-46cf-bc5b-dfcad517fb81-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.924495 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4n6pd"] Nov 26 06:58:06 crc kubenswrapper[4940]: I1126 06:58:06.930363 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4n6pd"] Nov 26 06:58:07 crc kubenswrapper[4940]: I1126 06:58:07.173437 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" path="/var/lib/kubelet/pods/5263e4cf-d56e-46cf-bc5b-dfcad517fb81/volumes" Nov 26 06:58:07 crc kubenswrapper[4940]: I1126 06:58:07.582300 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vr7rb" podUID="64de0c82-8255-4813-a615-e10e81aeede1" containerName="registry-server" containerID="cri-o://787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486" gracePeriod=2 Nov 26 06:58:07 crc kubenswrapper[4940]: I1126 06:58:07.964915 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.058358 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-utilities\") pod \"64de0c82-8255-4813-a615-e10e81aeede1\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.058516 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-catalog-content\") pod \"64de0c82-8255-4813-a615-e10e81aeede1\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.058591 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz8bd\" (UniqueName: \"kubernetes.io/projected/64de0c82-8255-4813-a615-e10e81aeede1-kube-api-access-gz8bd\") pod \"64de0c82-8255-4813-a615-e10e81aeede1\" (UID: \"64de0c82-8255-4813-a615-e10e81aeede1\") " Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.059324 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-utilities" (OuterVolumeSpecName: "utilities") pod "64de0c82-8255-4813-a615-e10e81aeede1" (UID: "64de0c82-8255-4813-a615-e10e81aeede1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.074785 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64de0c82-8255-4813-a615-e10e81aeede1-kube-api-access-gz8bd" (OuterVolumeSpecName: "kube-api-access-gz8bd") pod "64de0c82-8255-4813-a615-e10e81aeede1" (UID: "64de0c82-8255-4813-a615-e10e81aeede1"). InnerVolumeSpecName "kube-api-access-gz8bd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.109238 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64de0c82-8255-4813-a615-e10e81aeede1" (UID: "64de0c82-8255-4813-a615-e10e81aeede1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.160765 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.160819 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz8bd\" (UniqueName: \"kubernetes.io/projected/64de0c82-8255-4813-a615-e10e81aeede1-kube-api-access-gz8bd\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.160839 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64de0c82-8255-4813-a615-e10e81aeede1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.593979 4940 generic.go:334] "Generic (PLEG): container finished" podID="64de0c82-8255-4813-a615-e10e81aeede1" containerID="787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486" exitCode=0 Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.594025 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vr7rb" event={"ID":"64de0c82-8255-4813-a615-e10e81aeede1","Type":"ContainerDied","Data":"787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486"} Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.594108 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vr7rb" event={"ID":"64de0c82-8255-4813-a615-e10e81aeede1","Type":"ContainerDied","Data":"90c1591f5cd9e9b968ff9f7bb5367283ec5428836d9a3da14ab6f41914b8393f"} Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.594130 4940 scope.go:117] "RemoveContainer" containerID="787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.595667 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vr7rb" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.617837 4940 scope.go:117] "RemoveContainer" containerID="01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.651083 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vr7rb"] Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.654233 4940 scope.go:117] "RemoveContainer" containerID="d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.655790 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vr7rb"] Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.679289 4940 scope.go:117] "RemoveContainer" containerID="787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486" Nov 26 06:58:08 crc kubenswrapper[4940]: E1126 06:58:08.679867 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486\": container with ID starting with 787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486 not found: ID does not exist" containerID="787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.680003 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486"} err="failed to get container status \"787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486\": rpc error: code = NotFound desc = could not find container \"787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486\": container with ID starting with 787184118def96f60806d780cc2d70428d8b4e3ad12506331a6edb6d549c7486 not found: ID does not exist" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.680141 4940 scope.go:117] "RemoveContainer" containerID="01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb" Nov 26 06:58:08 crc kubenswrapper[4940]: E1126 06:58:08.680569 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb\": container with ID starting with 01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb not found: ID does not exist" containerID="01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.680596 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb"} err="failed to get container status \"01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb\": rpc error: code = NotFound desc = could not find container \"01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb\": container with ID starting with 01e1f20e9d1634fd76ca1bb26072848b1a879770df8be4325bff6592a7ef23bb not found: ID does not exist" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.680616 4940 scope.go:117] "RemoveContainer" containerID="d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3" Nov 26 06:58:08 crc kubenswrapper[4940]: E1126 06:58:08.680965 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3\": container with ID starting with d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3 not found: ID does not exist" containerID="d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3" Nov 26 06:58:08 crc kubenswrapper[4940]: I1126 06:58:08.681010 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3"} err="failed to get container status \"d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3\": rpc error: code = NotFound desc = could not find container \"d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3\": container with ID starting with d97b64dad7e9b82cf33c88bec9f2a2fad9123b91c5c066cfc30030037cbd64f3 not found: ID does not exist" Nov 26 06:58:09 crc kubenswrapper[4940]: I1126 06:58:09.175888 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64de0c82-8255-4813-a615-e10e81aeede1" path="/var/lib/kubelet/pods/64de0c82-8255-4813-a615-e10e81aeede1/volumes" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.259838 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" podUID="7bd56d38-cc74-420e-ab79-f16c8d36638f" containerName="oauth-openshift" containerID="cri-o://477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c" gracePeriod=15 Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.618560 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.629387 4940 generic.go:334] "Generic (PLEG): container finished" podID="7bd56d38-cc74-420e-ab79-f16c8d36638f" containerID="477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c" exitCode=0 Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.629452 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" event={"ID":"7bd56d38-cc74-420e-ab79-f16c8d36638f","Type":"ContainerDied","Data":"477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c"} Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.629481 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.629511 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2kv29" event={"ID":"7bd56d38-cc74-420e-ab79-f16c8d36638f","Type":"ContainerDied","Data":"0e1a88906edb02d7681ffae619b3c58c5b33cee30a1486271d784132ff2ac4ea"} Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.629536 4940 scope.go:117] "RemoveContainer" containerID="477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.654590 4940 scope.go:117] "RemoveContainer" containerID="477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c" Nov 26 06:58:13 crc kubenswrapper[4940]: E1126 06:58:13.654924 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c\": container with ID starting with 477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c not found: ID does not exist" containerID="477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.654962 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c"} err="failed to get container status \"477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c\": rpc error: code = NotFound desc = could not find container \"477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c\": container with ID starting with 477b6c44bc1d85f60bae7d703a10f04fd4b7cf6073025fadf192cd177fd6ab8c not found: ID does not exist" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736245 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-policies\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736314 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-idp-0-file-data\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736353 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-router-certs\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736406 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-login\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736440 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-session\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736467 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-trusted-ca-bundle\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736497 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-service-ca\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736531 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-serving-cert\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736555 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-dir\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736577 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7w7fd\" (UniqueName: \"kubernetes.io/projected/7bd56d38-cc74-420e-ab79-f16c8d36638f-kube-api-access-7w7fd\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736612 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-provider-selection\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736634 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-cliconfig\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736674 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-ocp-branding-template\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.736713 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-error\") pod \"7bd56d38-cc74-420e-ab79-f16c8d36638f\" (UID: \"7bd56d38-cc74-420e-ab79-f16c8d36638f\") " Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.737100 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.737446 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.738360 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.738411 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.739135 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.747211 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.747298 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bd56d38-cc74-420e-ab79-f16c8d36638f-kube-api-access-7w7fd" (OuterVolumeSpecName: "kube-api-access-7w7fd") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "kube-api-access-7w7fd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.747500 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.748081 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.759253 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.759385 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.759678 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.759975 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.760733 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "7bd56d38-cc74-420e-ab79-f16c8d36638f" (UID: "7bd56d38-cc74-420e-ab79-f16c8d36638f"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837781 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837823 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837838 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837852 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837866 4940 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837879 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837892 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837905 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837919 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837932 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837944 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837956 4940 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7bd56d38-cc74-420e-ab79-f16c8d36638f-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837969 4940 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7bd56d38-cc74-420e-ab79-f16c8d36638f-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.837982 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7w7fd\" (UniqueName: \"kubernetes.io/projected/7bd56d38-cc74-420e-ab79-f16c8d36638f-kube-api-access-7w7fd\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.968548 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2kv29"] Nov 26 06:58:13 crc kubenswrapper[4940]: I1126 06:58:13.975397 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2kv29"] Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.173346 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bd56d38-cc74-420e-ab79-f16c8d36638f" path="/var/lib/kubelet/pods/7bd56d38-cc74-420e-ab79-f16c8d36638f/volumes" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.480566 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-95988dd86-htfcv"] Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.481195 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.481227 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.481249 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerName="extract-content" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.481261 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerName="extract-content" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.481283 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" containerName="extract-utilities" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.481296 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" containerName="extract-utilities" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.481432 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64de0c82-8255-4813-a615-e10e81aeede1" containerName="extract-utilities" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482121 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="64de0c82-8255-4813-a615-e10e81aeede1" containerName="extract-utilities" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482241 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" containerName="extract-content" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482271 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" containerName="extract-content" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482296 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd56d38-cc74-420e-ab79-f16c8d36638f" containerName="oauth-openshift" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482317 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd56d38-cc74-420e-ab79-f16c8d36638f" containerName="oauth-openshift" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482343 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64de0c82-8255-4813-a615-e10e81aeede1" containerName="extract-content" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482359 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="64de0c82-8255-4813-a615-e10e81aeede1" containerName="extract-content" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482378 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482393 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482415 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" containerName="extract-utilities" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482430 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" containerName="extract-utilities" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482464 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" containerName="extract-content" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482482 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" containerName="extract-content" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482509 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64de0c82-8255-4813-a615-e10e81aeede1" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482525 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="64de0c82-8255-4813-a615-e10e81aeede1" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482546 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerName="extract-utilities" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482563 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerName="extract-utilities" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482588 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d6b3bdc-1843-46fd-adc6-e697f613d9ac" containerName="pruner" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482603 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d6b3bdc-1843-46fd-adc6-e697f613d9ac" containerName="pruner" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482628 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e653739e-f6bf-4bb4-9b62-0d7d5ffb0736" containerName="pruner" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482646 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e653739e-f6bf-4bb4-9b62-0d7d5ffb0736" containerName="pruner" Nov 26 06:58:15 crc kubenswrapper[4940]: E1126 06:58:15.482666 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.482681 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.483091 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b1f2008-3b1a-4665-8192-64c225577aea" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.483132 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5263e4cf-d56e-46cf-bc5b-dfcad517fb81" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.483157 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d6b3bdc-1843-46fd-adc6-e697f613d9ac" containerName="pruner" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.483186 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="245d45a4-2775-4470-a35b-92b1c870fd35" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.483210 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e653739e-f6bf-4bb4-9b62-0d7d5ffb0736" containerName="pruner" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.483229 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="64de0c82-8255-4813-a615-e10e81aeede1" containerName="registry-server" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.483253 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd56d38-cc74-420e-ab79-f16c8d36638f" containerName="oauth-openshift" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.484101 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.487674 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.488005 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.488146 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.489208 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.489487 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.489786 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.490023 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.490243 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.490490 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.490661 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.490700 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.490769 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.505010 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-95988dd86-htfcv"] Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.520308 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.526464 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.543485 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559378 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559429 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-cliconfig\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559454 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559478 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-service-ca\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559520 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-serving-cert\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559579 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-login\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559680 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559734 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dscm9\" (UniqueName: \"kubernetes.io/projected/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-kube-api-access-dscm9\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559767 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-router-certs\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.559936 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-audit-dir\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.560082 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-session\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.560156 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-audit-policies\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.560219 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.560285 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-error\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.663632 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-audit-policies\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.663709 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.663759 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-error\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.663815 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.663852 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-cliconfig\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.663891 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.663944 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-service-ca\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.664010 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-serving-cert\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.664089 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-login\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.664131 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.664171 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dscm9\" (UniqueName: \"kubernetes.io/projected/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-kube-api-access-dscm9\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.664206 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-router-certs\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.664266 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-audit-dir\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.664320 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-session\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.665190 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-audit-policies\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.666359 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-audit-dir\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.667611 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.668393 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-cliconfig\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.668920 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-service-ca\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.671659 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-router-certs\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.671706 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-login\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.672018 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-session\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.672305 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.676965 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.677657 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.682655 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-user-template-error\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.683783 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-v4-0-config-system-serving-cert\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.706016 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dscm9\" (UniqueName: \"kubernetes.io/projected/f73d2c10-00b8-4fed-9c0c-717a0bdbb160-kube-api-access-dscm9\") pod \"oauth-openshift-95988dd86-htfcv\" (UID: \"f73d2c10-00b8-4fed-9c0c-717a0bdbb160\") " pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:15 crc kubenswrapper[4940]: I1126 06:58:15.842309 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:16 crc kubenswrapper[4940]: I1126 06:58:16.318832 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-95988dd86-htfcv"] Nov 26 06:58:16 crc kubenswrapper[4940]: I1126 06:58:16.649458 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" event={"ID":"f73d2c10-00b8-4fed-9c0c-717a0bdbb160","Type":"ContainerStarted","Data":"61d23d5d610632cbfaf0d5400c794c67fdde53eba680094fe2e4de16277517e5"} Nov 26 06:58:16 crc kubenswrapper[4940]: I1126 06:58:16.649507 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" event={"ID":"f73d2c10-00b8-4fed-9c0c-717a0bdbb160","Type":"ContainerStarted","Data":"4ae7d0c78520934659420de3d7bf498bee15ac5fa118772164bfc8ef9cd0cd43"} Nov 26 06:58:16 crc kubenswrapper[4940]: I1126 06:58:16.649746 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:17 crc kubenswrapper[4940]: I1126 06:58:17.025407 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" Nov 26 06:58:17 crc kubenswrapper[4940]: I1126 06:58:17.050376 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-95988dd86-htfcv" podStartSLOduration=29.050355908 podStartE2EDuration="29.050355908s" podCreationTimestamp="2025-11-26 06:57:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:58:16.673236337 +0000 UTC m=+198.193377966" watchObservedRunningTime="2025-11-26 06:58:17.050355908 +0000 UTC m=+198.570497527" Nov 26 06:58:17 crc kubenswrapper[4940]: I1126 06:58:17.311116 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:58:21 crc kubenswrapper[4940]: I1126 06:58:21.728444 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:58:21 crc kubenswrapper[4940]: I1126 06:58:21.728825 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:58:21 crc kubenswrapper[4940]: I1126 06:58:21.728896 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 06:58:21 crc kubenswrapper[4940]: I1126 06:58:21.729771 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:58:21 crc kubenswrapper[4940]: I1126 06:58:21.729855 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb" gracePeriod=600 Nov 26 06:58:22 crc kubenswrapper[4940]: I1126 06:58:22.690239 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb" exitCode=0 Nov 26 06:58:22 crc kubenswrapper[4940]: I1126 06:58:22.690293 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb"} Nov 26 06:58:22 crc kubenswrapper[4940]: I1126 06:58:22.690899 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"b3655cde675d2cf9e1ba94c8eb1aa758ad65396882a59af55936e02bf229c62e"} Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.844231 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c9xgx"] Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.844961 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c9xgx" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="registry-server" containerID="cri-o://ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc" gracePeriod=30 Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.859650 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n8sjf"] Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.860629 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n8sjf" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="registry-server" containerID="cri-o://b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6" gracePeriod=30 Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.868535 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-294qn"] Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.868725 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" podUID="f6bef010-718c-468f-bc70-2424cd10e735" containerName="marketplace-operator" containerID="cri-o://4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650" gracePeriod=30 Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.877803 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nn64p"] Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.878016 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nn64p" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerName="registry-server" containerID="cri-o://edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587" gracePeriod=30 Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.890089 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xkfvk"] Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.890454 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xkfvk" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerName="registry-server" containerID="cri-o://a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5" gracePeriod=30 Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.895561 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-r5797"] Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.899232 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.906790 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-r5797"] Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.966775 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/de960209-97f8-4192-8dce-db459972eede-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.966837 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8k22\" (UniqueName: \"kubernetes.io/projected/de960209-97f8-4192-8dce-db459972eede-kube-api-access-l8k22\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:33 crc kubenswrapper[4940]: I1126 06:58:33.966898 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de960209-97f8-4192-8dce-db459972eede-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:33 crc kubenswrapper[4940]: E1126 06:58:33.984183 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6 is running failed: container process not found" containerID="b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 06:58:33 crc kubenswrapper[4940]: E1126 06:58:33.984682 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6 is running failed: container process not found" containerID="b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 06:58:33 crc kubenswrapper[4940]: E1126 06:58:33.985158 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6 is running failed: container process not found" containerID="b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 06:58:33 crc kubenswrapper[4940]: E1126 06:58:33.985194 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-n8sjf" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="registry-server" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.068308 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8k22\" (UniqueName: \"kubernetes.io/projected/de960209-97f8-4192-8dce-db459972eede-kube-api-access-l8k22\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.069400 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de960209-97f8-4192-8dce-db459972eede-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.069453 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/de960209-97f8-4192-8dce-db459972eede-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.072289 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de960209-97f8-4192-8dce-db459972eede-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.077344 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/de960209-97f8-4192-8dce-db459972eede-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.086324 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8k22\" (UniqueName: \"kubernetes.io/projected/de960209-97f8-4192-8dce-db459972eede-kube-api-access-l8k22\") pod \"marketplace-operator-79b997595-r5797\" (UID: \"de960209-97f8-4192-8dce-db459972eede\") " pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.283496 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.287163 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.294371 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.313789 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.316441 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.317003 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374462 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-operator-metrics\") pod \"f6bef010-718c-468f-bc70-2424cd10e735\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374514 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-catalog-content\") pod \"358bc8db-3143-435e-8d21-00fa78fa3029\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374540 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbgb4\" (UniqueName: \"kubernetes.io/projected/965f184e-b9c1-4f5a-a51e-c5e36466002d-kube-api-access-bbgb4\") pod \"965f184e-b9c1-4f5a-a51e-c5e36466002d\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374567 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-catalog-content\") pod \"965f184e-b9c1-4f5a-a51e-c5e36466002d\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374659 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-utilities\") pod \"965f184e-b9c1-4f5a-a51e-c5e36466002d\" (UID: \"965f184e-b9c1-4f5a-a51e-c5e36466002d\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374689 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-catalog-content\") pod \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374739 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-catalog-content\") pod \"1927422c-3af6-418b-ba25-2cbecefd45ad\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374766 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qsc9\" (UniqueName: \"kubernetes.io/projected/9fd9ac3b-989c-49b5-b377-e63aef7fd979-kube-api-access-5qsc9\") pod \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374793 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzgfb\" (UniqueName: \"kubernetes.io/projected/1927422c-3af6-418b-ba25-2cbecefd45ad-kube-api-access-qzgfb\") pod \"1927422c-3af6-418b-ba25-2cbecefd45ad\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374819 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slglm\" (UniqueName: \"kubernetes.io/projected/f6bef010-718c-468f-bc70-2424cd10e735-kube-api-access-slglm\") pod \"f6bef010-718c-468f-bc70-2424cd10e735\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374852 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca\") pod \"f6bef010-718c-468f-bc70-2424cd10e735\" (UID: \"f6bef010-718c-468f-bc70-2424cd10e735\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374878 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-utilities\") pod \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\" (UID: \"9fd9ac3b-989c-49b5-b377-e63aef7fd979\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374922 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-utilities\") pod \"358bc8db-3143-435e-8d21-00fa78fa3029\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374952 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-utilities\") pod \"1927422c-3af6-418b-ba25-2cbecefd45ad\" (UID: \"1927422c-3af6-418b-ba25-2cbecefd45ad\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.374980 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhqlh\" (UniqueName: \"kubernetes.io/projected/358bc8db-3143-435e-8d21-00fa78fa3029-kube-api-access-vhqlh\") pod \"358bc8db-3143-435e-8d21-00fa78fa3029\" (UID: \"358bc8db-3143-435e-8d21-00fa78fa3029\") " Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.377011 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-utilities" (OuterVolumeSpecName: "utilities") pod "965f184e-b9c1-4f5a-a51e-c5e36466002d" (UID: "965f184e-b9c1-4f5a-a51e-c5e36466002d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.378299 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-utilities" (OuterVolumeSpecName: "utilities") pod "358bc8db-3143-435e-8d21-00fa78fa3029" (UID: "358bc8db-3143-435e-8d21-00fa78fa3029"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.380292 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-utilities" (OuterVolumeSpecName: "utilities") pod "1927422c-3af6-418b-ba25-2cbecefd45ad" (UID: "1927422c-3af6-418b-ba25-2cbecefd45ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.380574 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/358bc8db-3143-435e-8d21-00fa78fa3029-kube-api-access-vhqlh" (OuterVolumeSpecName: "kube-api-access-vhqlh") pod "358bc8db-3143-435e-8d21-00fa78fa3029" (UID: "358bc8db-3143-435e-8d21-00fa78fa3029"). InnerVolumeSpecName "kube-api-access-vhqlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.381506 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6bef010-718c-468f-bc70-2424cd10e735-kube-api-access-slglm" (OuterVolumeSpecName: "kube-api-access-slglm") pod "f6bef010-718c-468f-bc70-2424cd10e735" (UID: "f6bef010-718c-468f-bc70-2424cd10e735"). InnerVolumeSpecName "kube-api-access-slglm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.381855 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fd9ac3b-989c-49b5-b377-e63aef7fd979-kube-api-access-5qsc9" (OuterVolumeSpecName: "kube-api-access-5qsc9") pod "9fd9ac3b-989c-49b5-b377-e63aef7fd979" (UID: "9fd9ac3b-989c-49b5-b377-e63aef7fd979"). InnerVolumeSpecName "kube-api-access-5qsc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.382754 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-utilities" (OuterVolumeSpecName: "utilities") pod "9fd9ac3b-989c-49b5-b377-e63aef7fd979" (UID: "9fd9ac3b-989c-49b5-b377-e63aef7fd979"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.383054 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "f6bef010-718c-468f-bc70-2424cd10e735" (UID: "f6bef010-718c-468f-bc70-2424cd10e735"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.383201 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/965f184e-b9c1-4f5a-a51e-c5e36466002d-kube-api-access-bbgb4" (OuterVolumeSpecName: "kube-api-access-bbgb4") pod "965f184e-b9c1-4f5a-a51e-c5e36466002d" (UID: "965f184e-b9c1-4f5a-a51e-c5e36466002d"). InnerVolumeSpecName "kube-api-access-bbgb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.383802 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "f6bef010-718c-468f-bc70-2424cd10e735" (UID: "f6bef010-718c-468f-bc70-2424cd10e735"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.390843 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1927422c-3af6-418b-ba25-2cbecefd45ad-kube-api-access-qzgfb" (OuterVolumeSpecName: "kube-api-access-qzgfb") pod "1927422c-3af6-418b-ba25-2cbecefd45ad" (UID: "1927422c-3af6-418b-ba25-2cbecefd45ad"). InnerVolumeSpecName "kube-api-access-qzgfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.422828 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "965f184e-b9c1-4f5a-a51e-c5e36466002d" (UID: "965f184e-b9c1-4f5a-a51e-c5e36466002d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.442217 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1927422c-3af6-418b-ba25-2cbecefd45ad" (UID: "1927422c-3af6-418b-ba25-2cbecefd45ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.444266 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "358bc8db-3143-435e-8d21-00fa78fa3029" (UID: "358bc8db-3143-435e-8d21-00fa78fa3029"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476491 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slglm\" (UniqueName: \"kubernetes.io/projected/f6bef010-718c-468f-bc70-2424cd10e735-kube-api-access-slglm\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476524 4940 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476538 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476552 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476563 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476574 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhqlh\" (UniqueName: \"kubernetes.io/projected/358bc8db-3143-435e-8d21-00fa78fa3029-kube-api-access-vhqlh\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476584 4940 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f6bef010-718c-468f-bc70-2424cd10e735-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476595 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/358bc8db-3143-435e-8d21-00fa78fa3029-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476605 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbgb4\" (UniqueName: \"kubernetes.io/projected/965f184e-b9c1-4f5a-a51e-c5e36466002d-kube-api-access-bbgb4\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476615 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476625 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965f184e-b9c1-4f5a-a51e-c5e36466002d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476635 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1927422c-3af6-418b-ba25-2cbecefd45ad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476645 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qsc9\" (UniqueName: \"kubernetes.io/projected/9fd9ac3b-989c-49b5-b377-e63aef7fd979-kube-api-access-5qsc9\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.476655 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzgfb\" (UniqueName: \"kubernetes.io/projected/1927422c-3af6-418b-ba25-2cbecefd45ad-kube-api-access-qzgfb\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.526182 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-r5797"] Nov 26 06:58:34 crc kubenswrapper[4940]: W1126 06:58:34.541231 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde960209_97f8_4192_8dce_db459972eede.slice/crio-da801530bb0352f17dbee0de72c84a4f8803baac2b08e305d3b37a6ad7a189cc WatchSource:0}: Error finding container da801530bb0352f17dbee0de72c84a4f8803baac2b08e305d3b37a6ad7a189cc: Status 404 returned error can't find the container with id da801530bb0352f17dbee0de72c84a4f8803baac2b08e305d3b37a6ad7a189cc Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.556639 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9fd9ac3b-989c-49b5-b377-e63aef7fd979" (UID: "9fd9ac3b-989c-49b5-b377-e63aef7fd979"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.578205 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fd9ac3b-989c-49b5-b377-e63aef7fd979-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.751787 4940 generic.go:334] "Generic (PLEG): container finished" podID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerID="b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6" exitCode=0 Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.751858 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n8sjf" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.751861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n8sjf" event={"ID":"1927422c-3af6-418b-ba25-2cbecefd45ad","Type":"ContainerDied","Data":"b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.752005 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n8sjf" event={"ID":"1927422c-3af6-418b-ba25-2cbecefd45ad","Type":"ContainerDied","Data":"45d3336f5556f20cb62d0942a1ad7c514b563d2bb6a400b0755dd2ec5217a475"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.752027 4940 scope.go:117] "RemoveContainer" containerID="b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.753348 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-r5797" event={"ID":"de960209-97f8-4192-8dce-db459972eede","Type":"ContainerStarted","Data":"8378e36dd04feb282feddafd58811e6d31a1eb2454b6abe3d89d92f36a6e7835"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.753364 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-r5797" event={"ID":"de960209-97f8-4192-8dce-db459972eede","Type":"ContainerStarted","Data":"da801530bb0352f17dbee0de72c84a4f8803baac2b08e305d3b37a6ad7a189cc"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.753501 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.754739 4940 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-r5797 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" start-of-body= Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.754769 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-r5797" podUID="de960209-97f8-4192-8dce-db459972eede" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.754960 4940 generic.go:334] "Generic (PLEG): container finished" podID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerID="a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5" exitCode=0 Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.754985 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xkfvk" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.755020 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xkfvk" event={"ID":"9fd9ac3b-989c-49b5-b377-e63aef7fd979","Type":"ContainerDied","Data":"a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.755057 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xkfvk" event={"ID":"9fd9ac3b-989c-49b5-b377-e63aef7fd979","Type":"ContainerDied","Data":"649f3221cba63e93c9375cf7796db9bbf1f2d279ec23be1b02e78878645df9d8"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.756548 4940 generic.go:334] "Generic (PLEG): container finished" podID="f6bef010-718c-468f-bc70-2424cd10e735" containerID="4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650" exitCode=0 Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.756581 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.756603 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" event={"ID":"f6bef010-718c-468f-bc70-2424cd10e735","Type":"ContainerDied","Data":"4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.756658 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-294qn" event={"ID":"f6bef010-718c-468f-bc70-2424cd10e735","Type":"ContainerDied","Data":"83cd923b885c7e96d9c7a624b41c02dab825a7a926e31e9b2da12a4960230083"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.761498 4940 generic.go:334] "Generic (PLEG): container finished" podID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerID="edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587" exitCode=0 Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.761562 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nn64p" event={"ID":"965f184e-b9c1-4f5a-a51e-c5e36466002d","Type":"ContainerDied","Data":"edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.761588 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nn64p" event={"ID":"965f184e-b9c1-4f5a-a51e-c5e36466002d","Type":"ContainerDied","Data":"9673054b3faa945a2fc78d894c6833256b301cb1b110d6b1f425a61e0dffacfc"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.761665 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nn64p" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.765335 4940 generic.go:334] "Generic (PLEG): container finished" podID="358bc8db-3143-435e-8d21-00fa78fa3029" containerID="ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc" exitCode=0 Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.765415 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9xgx" event={"ID":"358bc8db-3143-435e-8d21-00fa78fa3029","Type":"ContainerDied","Data":"ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.765511 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c9xgx" event={"ID":"358bc8db-3143-435e-8d21-00fa78fa3029","Type":"ContainerDied","Data":"7032b5fe8da6019e36bdb8d2d9b3ed0a607726f3bf61c7b8761f5189bc0bec6b"} Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.765627 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c9xgx" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.781414 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-r5797" podStartSLOduration=1.781398714 podStartE2EDuration="1.781398714s" podCreationTimestamp="2025-11-26 06:58:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:58:34.773340447 +0000 UTC m=+216.293482066" watchObservedRunningTime="2025-11-26 06:58:34.781398714 +0000 UTC m=+216.301540323" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.793745 4940 scope.go:117] "RemoveContainer" containerID="3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.809780 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-294qn"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.812368 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-294qn"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.821772 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nn64p"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.822407 4940 scope.go:117] "RemoveContainer" containerID="6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.825383 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nn64p"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.835866 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c9xgx"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.838093 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c9xgx"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.848226 4940 scope.go:117] "RemoveContainer" containerID="b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.848321 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xkfvk"] Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.849289 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6\": container with ID starting with b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6 not found: ID does not exist" containerID="b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.849343 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6"} err="failed to get container status \"b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6\": rpc error: code = NotFound desc = could not find container \"b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6\": container with ID starting with b460caddf93b65b21a46455afce59657a69fac1e06b96c97ff6f8a2a7b6224b6 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.849378 4940 scope.go:117] "RemoveContainer" containerID="3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.850254 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3\": container with ID starting with 3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3 not found: ID does not exist" containerID="3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.850285 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3"} err="failed to get container status \"3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3\": rpc error: code = NotFound desc = could not find container \"3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3\": container with ID starting with 3d2e0e9da930243498de07d09e9e5831a75a399b7911ffe225a3201142a9c7c3 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.850310 4940 scope.go:117] "RemoveContainer" containerID="6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.850567 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438\": container with ID starting with 6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438 not found: ID does not exist" containerID="6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.850596 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438"} err="failed to get container status \"6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438\": rpc error: code = NotFound desc = could not find container \"6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438\": container with ID starting with 6ec93e5a854040fb1f02257efbb080609cdad43992cc4201525be6c2e1844438 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.850612 4940 scope.go:117] "RemoveContainer" containerID="a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.854247 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xkfvk"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.863464 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n8sjf"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.866183 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n8sjf"] Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.873566 4940 scope.go:117] "RemoveContainer" containerID="ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.886572 4940 scope.go:117] "RemoveContainer" containerID="d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.899979 4940 scope.go:117] "RemoveContainer" containerID="a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.900486 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5\": container with ID starting with a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5 not found: ID does not exist" containerID="a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.900527 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5"} err="failed to get container status \"a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5\": rpc error: code = NotFound desc = could not find container \"a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5\": container with ID starting with a7ac82751e51db52082005b7ab36c4cb6f51568d961f38eabaf80e690c508ad5 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.900730 4940 scope.go:117] "RemoveContainer" containerID="ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.901702 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0\": container with ID starting with ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0 not found: ID does not exist" containerID="ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.901734 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0"} err="failed to get container status \"ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0\": rpc error: code = NotFound desc = could not find container \"ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0\": container with ID starting with ec5ba4fb813e1daf4018bbba938d200ef53e508f8c69a0382dd8b761613c1ee0 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.901755 4940 scope.go:117] "RemoveContainer" containerID="d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.901993 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee\": container with ID starting with d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee not found: ID does not exist" containerID="d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.902018 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee"} err="failed to get container status \"d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee\": rpc error: code = NotFound desc = could not find container \"d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee\": container with ID starting with d4c3e121b97e22ae0d4bc9b90930cf618f371fb82256279eda4fc0b6cbdd7eee not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.902029 4940 scope.go:117] "RemoveContainer" containerID="4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.915367 4940 scope.go:117] "RemoveContainer" containerID="4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.915810 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650\": container with ID starting with 4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650 not found: ID does not exist" containerID="4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.915861 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650"} err="failed to get container status \"4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650\": rpc error: code = NotFound desc = could not find container \"4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650\": container with ID starting with 4f3d48b4b15cf3153398dfc1f7b77ed1476cbbae9832d8757ba14253cacc1650 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.915893 4940 scope.go:117] "RemoveContainer" containerID="edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.927131 4940 scope.go:117] "RemoveContainer" containerID="089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.941354 4940 scope.go:117] "RemoveContainer" containerID="87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.957592 4940 scope.go:117] "RemoveContainer" containerID="edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.961172 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587\": container with ID starting with edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587 not found: ID does not exist" containerID="edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.961218 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587"} err="failed to get container status \"edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587\": rpc error: code = NotFound desc = could not find container \"edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587\": container with ID starting with edb0b28cd935f1c6c83e42b3571c7a69b087c2e1c5d26521e7500352852aa587 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.961252 4940 scope.go:117] "RemoveContainer" containerID="089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.961893 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550\": container with ID starting with 089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550 not found: ID does not exist" containerID="089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.961943 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550"} err="failed to get container status \"089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550\": rpc error: code = NotFound desc = could not find container \"089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550\": container with ID starting with 089c9e9f90de2ffc2ed4443307ee2be31575156d1ed8f4dfdc44a86b5a96e550 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.962434 4940 scope.go:117] "RemoveContainer" containerID="87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02" Nov 26 06:58:34 crc kubenswrapper[4940]: E1126 06:58:34.967746 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02\": container with ID starting with 87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02 not found: ID does not exist" containerID="87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.967784 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02"} err="failed to get container status \"87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02\": rpc error: code = NotFound desc = could not find container \"87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02\": container with ID starting with 87de476e0b10c4c6a004fd54197072dfbd6d560180e2050161023193074e0d02 not found: ID does not exist" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.967811 4940 scope.go:117] "RemoveContainer" containerID="ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc" Nov 26 06:58:34 crc kubenswrapper[4940]: I1126 06:58:34.990356 4940 scope.go:117] "RemoveContainer" containerID="2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.015170 4940 scope.go:117] "RemoveContainer" containerID="9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.050364 4940 scope.go:117] "RemoveContainer" containerID="ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc" Nov 26 06:58:35 crc kubenswrapper[4940]: E1126 06:58:35.050850 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc\": container with ID starting with ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc not found: ID does not exist" containerID="ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.050901 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc"} err="failed to get container status \"ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc\": rpc error: code = NotFound desc = could not find container \"ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc\": container with ID starting with ac5886e0db79de052fe7abf6d1d9550dafd0b401f0ab7ed6302aad6acf71f0cc not found: ID does not exist" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.050936 4940 scope.go:117] "RemoveContainer" containerID="2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90" Nov 26 06:58:35 crc kubenswrapper[4940]: E1126 06:58:35.051357 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90\": container with ID starting with 2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90 not found: ID does not exist" containerID="2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.051391 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90"} err="failed to get container status \"2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90\": rpc error: code = NotFound desc = could not find container \"2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90\": container with ID starting with 2455bf3f97738170a7daac5f5979cf7a50392ccedfca638d96b0ee72907dab90 not found: ID does not exist" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.051414 4940 scope.go:117] "RemoveContainer" containerID="9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313" Nov 26 06:58:35 crc kubenswrapper[4940]: E1126 06:58:35.051669 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313\": container with ID starting with 9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313 not found: ID does not exist" containerID="9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.051700 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313"} err="failed to get container status \"9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313\": rpc error: code = NotFound desc = could not find container \"9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313\": container with ID starting with 9353cb53e91511517565eaf895cddf2cc85310a1e361a2f3c83fd2a8016fd313 not found: ID does not exist" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.172495 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" path="/var/lib/kubelet/pods/1927422c-3af6-418b-ba25-2cbecefd45ad/volumes" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.173115 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" path="/var/lib/kubelet/pods/358bc8db-3143-435e-8d21-00fa78fa3029/volumes" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.173651 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" path="/var/lib/kubelet/pods/965f184e-b9c1-4f5a-a51e-c5e36466002d/volumes" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.174201 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" path="/var/lib/kubelet/pods/9fd9ac3b-989c-49b5-b377-e63aef7fd979/volumes" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.174768 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6bef010-718c-468f-bc70-2424cd10e735" path="/var/lib/kubelet/pods/f6bef010-718c-468f-bc70-2424cd10e735/volumes" Nov 26 06:58:35 crc kubenswrapper[4940]: I1126 06:58:35.776748 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-r5797" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.063682 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jpxgr"] Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.065355 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerName="extract-utilities" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.065468 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerName="extract-utilities" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.065550 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="extract-utilities" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.065661 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="extract-utilities" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.065748 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="extract-content" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.065824 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="extract-content" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.065899 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="extract-utilities" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.066000 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="extract-utilities" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.066100 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerName="extract-content" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.066198 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerName="extract-content" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.066284 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6bef010-718c-468f-bc70-2424cd10e735" containerName="marketplace-operator" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.066362 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6bef010-718c-468f-bc70-2424cd10e735" containerName="marketplace-operator" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.066441 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.066527 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.066609 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.066690 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.066765 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerName="extract-content" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.066834 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerName="extract-content" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.066953 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerName="extract-utilities" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.067209 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerName="extract-utilities" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.067282 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="extract-content" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.067334 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="extract-content" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.067400 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.067451 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: E1126 06:58:36.067507 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.067576 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.067727 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="965f184e-b9c1-4f5a-a51e-c5e36466002d" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.067790 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="358bc8db-3143-435e-8d21-00fa78fa3029" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.067849 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fd9ac3b-989c-49b5-b377-e63aef7fd979" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.067909 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1927422c-3af6-418b-ba25-2cbecefd45ad" containerName="registry-server" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.068001 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6bef010-718c-468f-bc70-2424cd10e735" containerName="marketplace-operator" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.068767 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.071111 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.081271 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpxgr"] Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.093676 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dj67r\" (UniqueName: \"kubernetes.io/projected/ee57acbd-ac45-48fa-9ced-dbf75469d3db-kube-api-access-dj67r\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.093747 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee57acbd-ac45-48fa-9ced-dbf75469d3db-utilities\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.093831 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee57acbd-ac45-48fa-9ced-dbf75469d3db-catalog-content\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.194784 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dj67r\" (UniqueName: \"kubernetes.io/projected/ee57acbd-ac45-48fa-9ced-dbf75469d3db-kube-api-access-dj67r\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.195300 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee57acbd-ac45-48fa-9ced-dbf75469d3db-utilities\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.195438 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee57acbd-ac45-48fa-9ced-dbf75469d3db-catalog-content\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.195828 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee57acbd-ac45-48fa-9ced-dbf75469d3db-catalog-content\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.196133 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee57acbd-ac45-48fa-9ced-dbf75469d3db-utilities\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.216483 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dj67r\" (UniqueName: \"kubernetes.io/projected/ee57acbd-ac45-48fa-9ced-dbf75469d3db-kube-api-access-dj67r\") pod \"redhat-marketplace-jpxgr\" (UID: \"ee57acbd-ac45-48fa-9ced-dbf75469d3db\") " pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.259226 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ghgss"] Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.261102 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.264450 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.272807 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ghgss"] Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.296089 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ht9t2\" (UniqueName: \"kubernetes.io/projected/2600a6f3-fe01-4cca-9e56-a8981611d6e9-kube-api-access-ht9t2\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.296266 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2600a6f3-fe01-4cca-9e56-a8981611d6e9-catalog-content\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.296366 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2600a6f3-fe01-4cca-9e56-a8981611d6e9-utilities\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.387259 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.397933 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ht9t2\" (UniqueName: \"kubernetes.io/projected/2600a6f3-fe01-4cca-9e56-a8981611d6e9-kube-api-access-ht9t2\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.397986 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2600a6f3-fe01-4cca-9e56-a8981611d6e9-catalog-content\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.398021 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2600a6f3-fe01-4cca-9e56-a8981611d6e9-utilities\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.398499 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2600a6f3-fe01-4cca-9e56-a8981611d6e9-utilities\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.398751 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2600a6f3-fe01-4cca-9e56-a8981611d6e9-catalog-content\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.421582 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ht9t2\" (UniqueName: \"kubernetes.io/projected/2600a6f3-fe01-4cca-9e56-a8981611d6e9-kube-api-access-ht9t2\") pod \"certified-operators-ghgss\" (UID: \"2600a6f3-fe01-4cca-9e56-a8981611d6e9\") " pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.595339 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.798004 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpxgr"] Nov 26 06:58:36 crc kubenswrapper[4940]: I1126 06:58:36.847004 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ghgss"] Nov 26 06:58:36 crc kubenswrapper[4940]: W1126 06:58:36.854455 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2600a6f3_fe01_4cca_9e56_a8981611d6e9.slice/crio-f11f7fa39f51b27ca7a75528d33f8884968ee8ac8da7347d4fae3b021be2cf1c WatchSource:0}: Error finding container f11f7fa39f51b27ca7a75528d33f8884968ee8ac8da7347d4fae3b021be2cf1c: Status 404 returned error can't find the container with id f11f7fa39f51b27ca7a75528d33f8884968ee8ac8da7347d4fae3b021be2cf1c Nov 26 06:58:37 crc kubenswrapper[4940]: I1126 06:58:37.787494 4940 generic.go:334] "Generic (PLEG): container finished" podID="ee57acbd-ac45-48fa-9ced-dbf75469d3db" containerID="d3de2dc1e5f85c424dca44242cca060393c0129a337ef481fba61e027ee6ae11" exitCode=0 Nov 26 06:58:37 crc kubenswrapper[4940]: I1126 06:58:37.787590 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpxgr" event={"ID":"ee57acbd-ac45-48fa-9ced-dbf75469d3db","Type":"ContainerDied","Data":"d3de2dc1e5f85c424dca44242cca060393c0129a337ef481fba61e027ee6ae11"} Nov 26 06:58:37 crc kubenswrapper[4940]: I1126 06:58:37.787809 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpxgr" event={"ID":"ee57acbd-ac45-48fa-9ced-dbf75469d3db","Type":"ContainerStarted","Data":"7f62c9930b6d862c22297584094cb888acad98088d4638f449bc4c2819f629ed"} Nov 26 06:58:37 crc kubenswrapper[4940]: I1126 06:58:37.790089 4940 generic.go:334] "Generic (PLEG): container finished" podID="2600a6f3-fe01-4cca-9e56-a8981611d6e9" containerID="d391c1610139f7e12b25008c4ee0302ede7e67f538dd0c5aace75dc590807624" exitCode=0 Nov 26 06:58:37 crc kubenswrapper[4940]: I1126 06:58:37.790120 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghgss" event={"ID":"2600a6f3-fe01-4cca-9e56-a8981611d6e9","Type":"ContainerDied","Data":"d391c1610139f7e12b25008c4ee0302ede7e67f538dd0c5aace75dc590807624"} Nov 26 06:58:37 crc kubenswrapper[4940]: I1126 06:58:37.790137 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghgss" event={"ID":"2600a6f3-fe01-4cca-9e56-a8981611d6e9","Type":"ContainerStarted","Data":"f11f7fa39f51b27ca7a75528d33f8884968ee8ac8da7347d4fae3b021be2cf1c"} Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.480752 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v2ldj"] Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.482310 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.485391 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.485774 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v2ldj"] Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.527245 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e769a8-5ba1-466e-bdbb-01367e025ad1-utilities\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.527303 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e769a8-5ba1-466e-bdbb-01367e025ad1-catalog-content\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.527328 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czmxc\" (UniqueName: \"kubernetes.io/projected/49e769a8-5ba1-466e-bdbb-01367e025ad1-kube-api-access-czmxc\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.628747 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e769a8-5ba1-466e-bdbb-01367e025ad1-utilities\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.628786 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e769a8-5ba1-466e-bdbb-01367e025ad1-catalog-content\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.628808 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czmxc\" (UniqueName: \"kubernetes.io/projected/49e769a8-5ba1-466e-bdbb-01367e025ad1-kube-api-access-czmxc\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.629522 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49e769a8-5ba1-466e-bdbb-01367e025ad1-utilities\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.629734 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49e769a8-5ba1-466e-bdbb-01367e025ad1-catalog-content\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.666657 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lrt8v"] Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.671556 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czmxc\" (UniqueName: \"kubernetes.io/projected/49e769a8-5ba1-466e-bdbb-01367e025ad1-kube-api-access-czmxc\") pod \"redhat-operators-v2ldj\" (UID: \"49e769a8-5ba1-466e-bdbb-01367e025ad1\") " pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.673646 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.674460 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lrt8v"] Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.676427 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.729312 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsc5s\" (UniqueName: \"kubernetes.io/projected/199be261-efb1-49e3-8e6e-f2237eafc202-kube-api-access-hsc5s\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.729382 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/199be261-efb1-49e3-8e6e-f2237eafc202-utilities\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.729418 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/199be261-efb1-49e3-8e6e-f2237eafc202-catalog-content\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.796412 4940 generic.go:334] "Generic (PLEG): container finished" podID="2600a6f3-fe01-4cca-9e56-a8981611d6e9" containerID="df50c3e8c6ca22b61b1bccb06a0181472608328a30074b048daca55bfc5fccc7" exitCode=0 Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.796470 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghgss" event={"ID":"2600a6f3-fe01-4cca-9e56-a8981611d6e9","Type":"ContainerDied","Data":"df50c3e8c6ca22b61b1bccb06a0181472608328a30074b048daca55bfc5fccc7"} Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.800649 4940 generic.go:334] "Generic (PLEG): container finished" podID="ee57acbd-ac45-48fa-9ced-dbf75469d3db" containerID="feb7837851c189b18c051d04c9fea20f9dd5494f457427a44b57b70a2410ec2f" exitCode=0 Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.800765 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpxgr" event={"ID":"ee57acbd-ac45-48fa-9ced-dbf75469d3db","Type":"ContainerDied","Data":"feb7837851c189b18c051d04c9fea20f9dd5494f457427a44b57b70a2410ec2f"} Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.819375 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.830797 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsc5s\" (UniqueName: \"kubernetes.io/projected/199be261-efb1-49e3-8e6e-f2237eafc202-kube-api-access-hsc5s\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.830996 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/199be261-efb1-49e3-8e6e-f2237eafc202-utilities\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.831158 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/199be261-efb1-49e3-8e6e-f2237eafc202-catalog-content\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.831701 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/199be261-efb1-49e3-8e6e-f2237eafc202-utilities\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.831955 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/199be261-efb1-49e3-8e6e-f2237eafc202-catalog-content\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:38 crc kubenswrapper[4940]: I1126 06:58:38.856077 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsc5s\" (UniqueName: \"kubernetes.io/projected/199be261-efb1-49e3-8e6e-f2237eafc202-kube-api-access-hsc5s\") pod \"community-operators-lrt8v\" (UID: \"199be261-efb1-49e3-8e6e-f2237eafc202\") " pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.056412 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.214722 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v2ldj"] Nov 26 06:58:39 crc kubenswrapper[4940]: W1126 06:58:39.224184 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49e769a8_5ba1_466e_bdbb_01367e025ad1.slice/crio-eafb5213803edf3216479a9e7990b1791a768108817deeebff30b52e6d17bd19 WatchSource:0}: Error finding container eafb5213803edf3216479a9e7990b1791a768108817deeebff30b52e6d17bd19: Status 404 returned error can't find the container with id eafb5213803edf3216479a9e7990b1791a768108817deeebff30b52e6d17bd19 Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.482343 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lrt8v"] Nov 26 06:58:39 crc kubenswrapper[4940]: W1126 06:58:39.526520 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod199be261_efb1_49e3_8e6e_f2237eafc202.slice/crio-20bd4383074fda0f263facba5eeaaf47ebeeb7707117d7f7c3d3e1de51f0f43a WatchSource:0}: Error finding container 20bd4383074fda0f263facba5eeaaf47ebeeb7707117d7f7c3d3e1de51f0f43a: Status 404 returned error can't find the container with id 20bd4383074fda0f263facba5eeaaf47ebeeb7707117d7f7c3d3e1de51f0f43a Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.807775 4940 generic.go:334] "Generic (PLEG): container finished" podID="49e769a8-5ba1-466e-bdbb-01367e025ad1" containerID="ecaf233e9b5e4bbb74ec811fde2dadf2c454f799d913277577123350d107fb0f" exitCode=0 Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.807878 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v2ldj" event={"ID":"49e769a8-5ba1-466e-bdbb-01367e025ad1","Type":"ContainerDied","Data":"ecaf233e9b5e4bbb74ec811fde2dadf2c454f799d913277577123350d107fb0f"} Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.808160 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v2ldj" event={"ID":"49e769a8-5ba1-466e-bdbb-01367e025ad1","Type":"ContainerStarted","Data":"eafb5213803edf3216479a9e7990b1791a768108817deeebff30b52e6d17bd19"} Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.812550 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ghgss" event={"ID":"2600a6f3-fe01-4cca-9e56-a8981611d6e9","Type":"ContainerStarted","Data":"035cfe9c0a779d8817963392988496fe2a0c64b13e51133019d5be76aae28159"} Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.815657 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpxgr" event={"ID":"ee57acbd-ac45-48fa-9ced-dbf75469d3db","Type":"ContainerStarted","Data":"4ef595c5dfac3878b9d35ec1a8060ab61665641c08c0f220647ef3fe9aeaa693"} Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.817542 4940 generic.go:334] "Generic (PLEG): container finished" podID="199be261-efb1-49e3-8e6e-f2237eafc202" containerID="be0e2833b22c4e3beaed0af37440a47b49f06f877df54e0e97a79f970c07bea1" exitCode=0 Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.817576 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrt8v" event={"ID":"199be261-efb1-49e3-8e6e-f2237eafc202","Type":"ContainerDied","Data":"be0e2833b22c4e3beaed0af37440a47b49f06f877df54e0e97a79f970c07bea1"} Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.817596 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrt8v" event={"ID":"199be261-efb1-49e3-8e6e-f2237eafc202","Type":"ContainerStarted","Data":"20bd4383074fda0f263facba5eeaaf47ebeeb7707117d7f7c3d3e1de51f0f43a"} Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.867586 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jpxgr" podStartSLOduration=2.346395781 podStartE2EDuration="3.867566541s" podCreationTimestamp="2025-11-26 06:58:36 +0000 UTC" firstStartedPulling="2025-11-26 06:58:37.793154157 +0000 UTC m=+219.313295826" lastFinishedPulling="2025-11-26 06:58:39.314324967 +0000 UTC m=+220.834466586" observedRunningTime="2025-11-26 06:58:39.864792663 +0000 UTC m=+221.384934292" watchObservedRunningTime="2025-11-26 06:58:39.867566541 +0000 UTC m=+221.387708160" Nov 26 06:58:39 crc kubenswrapper[4940]: I1126 06:58:39.883178 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ghgss" podStartSLOduration=2.37544985 podStartE2EDuration="3.883159979s" podCreationTimestamp="2025-11-26 06:58:36 +0000 UTC" firstStartedPulling="2025-11-26 06:58:37.791492055 +0000 UTC m=+219.311633694" lastFinishedPulling="2025-11-26 06:58:39.299202204 +0000 UTC m=+220.819343823" observedRunningTime="2025-11-26 06:58:39.880101072 +0000 UTC m=+221.400242711" watchObservedRunningTime="2025-11-26 06:58:39.883159979 +0000 UTC m=+221.403301598" Nov 26 06:58:40 crc kubenswrapper[4940]: I1126 06:58:40.825099 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrt8v" event={"ID":"199be261-efb1-49e3-8e6e-f2237eafc202","Type":"ContainerDied","Data":"fcfad79629ec06a1f1953f421d1b72bdd18ad7c07e67ec3a96be42b78156ae73"} Nov 26 06:58:40 crc kubenswrapper[4940]: I1126 06:58:40.825456 4940 generic.go:334] "Generic (PLEG): container finished" podID="199be261-efb1-49e3-8e6e-f2237eafc202" containerID="fcfad79629ec06a1f1953f421d1b72bdd18ad7c07e67ec3a96be42b78156ae73" exitCode=0 Nov 26 06:58:40 crc kubenswrapper[4940]: I1126 06:58:40.829738 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v2ldj" event={"ID":"49e769a8-5ba1-466e-bdbb-01367e025ad1","Type":"ContainerStarted","Data":"36c50c2427012e71095a40f407e7e2ac5812c6ca77ae0cf42596f563dcf71af7"} Nov 26 06:58:41 crc kubenswrapper[4940]: I1126 06:58:41.836548 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lrt8v" event={"ID":"199be261-efb1-49e3-8e6e-f2237eafc202","Type":"ContainerStarted","Data":"746b6d14afea81205d96ae36e41b9193831a2ddfe079e9d67cb60320c554d91f"} Nov 26 06:58:41 crc kubenswrapper[4940]: I1126 06:58:41.838838 4940 generic.go:334] "Generic (PLEG): container finished" podID="49e769a8-5ba1-466e-bdbb-01367e025ad1" containerID="36c50c2427012e71095a40f407e7e2ac5812c6ca77ae0cf42596f563dcf71af7" exitCode=0 Nov 26 06:58:41 crc kubenswrapper[4940]: I1126 06:58:41.838866 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v2ldj" event={"ID":"49e769a8-5ba1-466e-bdbb-01367e025ad1","Type":"ContainerDied","Data":"36c50c2427012e71095a40f407e7e2ac5812c6ca77ae0cf42596f563dcf71af7"} Nov 26 06:58:41 crc kubenswrapper[4940]: I1126 06:58:41.855546 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lrt8v" podStartSLOduration=2.449297335 podStartE2EDuration="3.855523825s" podCreationTimestamp="2025-11-26 06:58:38 +0000 UTC" firstStartedPulling="2025-11-26 06:58:39.81866893 +0000 UTC m=+221.338810549" lastFinishedPulling="2025-11-26 06:58:41.22489542 +0000 UTC m=+222.745037039" observedRunningTime="2025-11-26 06:58:41.854484272 +0000 UTC m=+223.374625891" watchObservedRunningTime="2025-11-26 06:58:41.855523825 +0000 UTC m=+223.375665444" Nov 26 06:58:42 crc kubenswrapper[4940]: I1126 06:58:42.849952 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v2ldj" event={"ID":"49e769a8-5ba1-466e-bdbb-01367e025ad1","Type":"ContainerStarted","Data":"a2491e5cbd29917d91a19db2656fe9b5e95ec163fe5a4492cfedaa9f576e4547"} Nov 26 06:58:42 crc kubenswrapper[4940]: I1126 06:58:42.882371 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v2ldj" podStartSLOduration=2.463172098 podStartE2EDuration="4.88234467s" podCreationTimestamp="2025-11-26 06:58:38 +0000 UTC" firstStartedPulling="2025-11-26 06:58:39.809413055 +0000 UTC m=+221.329554684" lastFinishedPulling="2025-11-26 06:58:42.228585597 +0000 UTC m=+223.748727256" observedRunningTime="2025-11-26 06:58:42.876215744 +0000 UTC m=+224.396357403" watchObservedRunningTime="2025-11-26 06:58:42.88234467 +0000 UTC m=+224.402486329" Nov 26 06:58:46 crc kubenswrapper[4940]: I1126 06:58:46.387859 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:46 crc kubenswrapper[4940]: I1126 06:58:46.388700 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:46 crc kubenswrapper[4940]: I1126 06:58:46.446381 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:46 crc kubenswrapper[4940]: I1126 06:58:46.596322 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:46 crc kubenswrapper[4940]: I1126 06:58:46.596543 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:46 crc kubenswrapper[4940]: I1126 06:58:46.637166 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:46 crc kubenswrapper[4940]: I1126 06:58:46.917154 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jpxgr" Nov 26 06:58:46 crc kubenswrapper[4940]: I1126 06:58:46.922950 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ghgss" Nov 26 06:58:48 crc kubenswrapper[4940]: I1126 06:58:48.820208 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:48 crc kubenswrapper[4940]: I1126 06:58:48.820576 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:48 crc kubenswrapper[4940]: I1126 06:58:48.887849 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:48 crc kubenswrapper[4940]: I1126 06:58:48.942066 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v2ldj" Nov 26 06:58:49 crc kubenswrapper[4940]: I1126 06:58:49.057643 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:49 crc kubenswrapper[4940]: I1126 06:58:49.057692 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:49 crc kubenswrapper[4940]: I1126 06:58:49.095949 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lrt8v" Nov 26 06:58:49 crc kubenswrapper[4940]: I1126 06:58:49.947914 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lrt8v" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.682270 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5"] Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.683327 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.687479 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.689553 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.696655 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5"] Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.848444 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/acefc5b4-b4c5-474f-a038-b04842446dc9-config-volume\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.848569 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/acefc5b4-b4c5-474f-a038-b04842446dc9-secret-volume\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.848614 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jthqq\" (UniqueName: \"kubernetes.io/projected/acefc5b4-b4c5-474f-a038-b04842446dc9-kube-api-access-jthqq\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.949916 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/acefc5b4-b4c5-474f-a038-b04842446dc9-config-volume\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.950079 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/acefc5b4-b4c5-474f-a038-b04842446dc9-secret-volume\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.951069 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/acefc5b4-b4c5-474f-a038-b04842446dc9-config-volume\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.951153 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jthqq\" (UniqueName: \"kubernetes.io/projected/acefc5b4-b4c5-474f-a038-b04842446dc9-kube-api-access-jthqq\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.956463 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/acefc5b4-b4c5-474f-a038-b04842446dc9-secret-volume\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.974068 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jthqq\" (UniqueName: \"kubernetes.io/projected/acefc5b4-b4c5-474f-a038-b04842446dc9-kube-api-access-jthqq\") pod \"collect-profiles-29402340-chpz5\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:00 crc kubenswrapper[4940]: I1126 07:00:00.998202 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:01 crc kubenswrapper[4940]: I1126 07:00:01.249814 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5"] Nov 26 07:00:01 crc kubenswrapper[4940]: I1126 07:00:01.656724 4940 generic.go:334] "Generic (PLEG): container finished" podID="acefc5b4-b4c5-474f-a038-b04842446dc9" containerID="cb01e0c93687a515dea84d0d2a9c6a96f5132f0e7d9f9c8a2a79b2c6a89fd119" exitCode=0 Nov 26 07:00:01 crc kubenswrapper[4940]: I1126 07:00:01.656772 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" event={"ID":"acefc5b4-b4c5-474f-a038-b04842446dc9","Type":"ContainerDied","Data":"cb01e0c93687a515dea84d0d2a9c6a96f5132f0e7d9f9c8a2a79b2c6a89fd119"} Nov 26 07:00:01 crc kubenswrapper[4940]: I1126 07:00:01.656804 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" event={"ID":"acefc5b4-b4c5-474f-a038-b04842446dc9","Type":"ContainerStarted","Data":"c80ae28eb84525a3b49db7f453404aa6ac4d121c88dc5ceda0018bc66a5f39a0"} Nov 26 07:00:02 crc kubenswrapper[4940]: I1126 07:00:02.935499 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.084647 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jthqq\" (UniqueName: \"kubernetes.io/projected/acefc5b4-b4c5-474f-a038-b04842446dc9-kube-api-access-jthqq\") pod \"acefc5b4-b4c5-474f-a038-b04842446dc9\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.085705 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/acefc5b4-b4c5-474f-a038-b04842446dc9-config-volume\") pod \"acefc5b4-b4c5-474f-a038-b04842446dc9\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.085754 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/acefc5b4-b4c5-474f-a038-b04842446dc9-secret-volume\") pod \"acefc5b4-b4c5-474f-a038-b04842446dc9\" (UID: \"acefc5b4-b4c5-474f-a038-b04842446dc9\") " Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.086356 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/acefc5b4-b4c5-474f-a038-b04842446dc9-config-volume" (OuterVolumeSpecName: "config-volume") pod "acefc5b4-b4c5-474f-a038-b04842446dc9" (UID: "acefc5b4-b4c5-474f-a038-b04842446dc9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.089854 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acefc5b4-b4c5-474f-a038-b04842446dc9-kube-api-access-jthqq" (OuterVolumeSpecName: "kube-api-access-jthqq") pod "acefc5b4-b4c5-474f-a038-b04842446dc9" (UID: "acefc5b4-b4c5-474f-a038-b04842446dc9"). InnerVolumeSpecName "kube-api-access-jthqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.090978 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acefc5b4-b4c5-474f-a038-b04842446dc9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "acefc5b4-b4c5-474f-a038-b04842446dc9" (UID: "acefc5b4-b4c5-474f-a038-b04842446dc9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.187202 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jthqq\" (UniqueName: \"kubernetes.io/projected/acefc5b4-b4c5-474f-a038-b04842446dc9-kube-api-access-jthqq\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.187235 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/acefc5b4-b4c5-474f-a038-b04842446dc9-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.187243 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/acefc5b4-b4c5-474f-a038-b04842446dc9-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.669382 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" event={"ID":"acefc5b4-b4c5-474f-a038-b04842446dc9","Type":"ContainerDied","Data":"c80ae28eb84525a3b49db7f453404aa6ac4d121c88dc5ceda0018bc66a5f39a0"} Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.669434 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c80ae28eb84525a3b49db7f453404aa6ac4d121c88dc5ceda0018bc66a5f39a0" Nov 26 07:00:03 crc kubenswrapper[4940]: I1126 07:00:03.669497 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5" Nov 26 07:00:51 crc kubenswrapper[4940]: I1126 07:00:51.728343 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:00:51 crc kubenswrapper[4940]: I1126 07:00:51.729147 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:01:21 crc kubenswrapper[4940]: I1126 07:01:21.729305 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:01:21 crc kubenswrapper[4940]: I1126 07:01:21.730161 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:01:51 crc kubenswrapper[4940]: I1126 07:01:51.728511 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:01:51 crc kubenswrapper[4940]: I1126 07:01:51.729340 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:01:51 crc kubenswrapper[4940]: I1126 07:01:51.729426 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:01:51 crc kubenswrapper[4940]: I1126 07:01:51.730658 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b3655cde675d2cf9e1ba94c8eb1aa758ad65396882a59af55936e02bf229c62e"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:01:51 crc kubenswrapper[4940]: I1126 07:01:51.730766 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://b3655cde675d2cf9e1ba94c8eb1aa758ad65396882a59af55936e02bf229c62e" gracePeriod=600 Nov 26 07:01:52 crc kubenswrapper[4940]: I1126 07:01:52.364252 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="b3655cde675d2cf9e1ba94c8eb1aa758ad65396882a59af55936e02bf229c62e" exitCode=0 Nov 26 07:01:52 crc kubenswrapper[4940]: I1126 07:01:52.364353 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"b3655cde675d2cf9e1ba94c8eb1aa758ad65396882a59af55936e02bf229c62e"} Nov 26 07:01:52 crc kubenswrapper[4940]: I1126 07:01:52.364577 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"3eee2744c5ef6eeac0756de5055d0728c038b9c6865c21fd3c9a2ecfca6cd031"} Nov 26 07:01:52 crc kubenswrapper[4940]: I1126 07:01:52.364597 4940 scope.go:117] "RemoveContainer" containerID="1c07d9f9e91a10aa564f736e0044ca949295117833af1decb48bfa20043439fb" Nov 26 07:01:54 crc kubenswrapper[4940]: I1126 07:01:54.993387 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-4r5p4"] Nov 26 07:01:54 crc kubenswrapper[4940]: E1126 07:01:54.993945 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acefc5b4-b4c5-474f-a038-b04842446dc9" containerName="collect-profiles" Nov 26 07:01:54 crc kubenswrapper[4940]: I1126 07:01:54.993961 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="acefc5b4-b4c5-474f-a038-b04842446dc9" containerName="collect-profiles" Nov 26 07:01:54 crc kubenswrapper[4940]: I1126 07:01:54.994098 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="acefc5b4-b4c5-474f-a038-b04842446dc9" containerName="collect-profiles" Nov 26 07:01:54 crc kubenswrapper[4940]: I1126 07:01:54.994540 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.023147 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-4r5p4"] Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.109101 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.109187 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-bound-sa-token\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.109212 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbt9s\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-kube-api-access-xbt9s\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.109282 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-registry-tls\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.109327 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/aabd5b58-bbf0-4256-8909-4a78aebfb05e-registry-certificates\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.109371 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aabd5b58-bbf0-4256-8909-4a78aebfb05e-trusted-ca\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.109398 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/aabd5b58-bbf0-4256-8909-4a78aebfb05e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.109447 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/aabd5b58-bbf0-4256-8909-4a78aebfb05e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.136843 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.210114 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-registry-tls\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.210205 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/aabd5b58-bbf0-4256-8909-4a78aebfb05e-registry-certificates\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.210238 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aabd5b58-bbf0-4256-8909-4a78aebfb05e-trusted-ca\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.210379 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/aabd5b58-bbf0-4256-8909-4a78aebfb05e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.211260 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/aabd5b58-bbf0-4256-8909-4a78aebfb05e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.211323 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-bound-sa-token\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.211347 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbt9s\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-kube-api-access-xbt9s\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.211515 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/aabd5b58-bbf0-4256-8909-4a78aebfb05e-registry-certificates\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.211876 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/aabd5b58-bbf0-4256-8909-4a78aebfb05e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.211933 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aabd5b58-bbf0-4256-8909-4a78aebfb05e-trusted-ca\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.215887 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/aabd5b58-bbf0-4256-8909-4a78aebfb05e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.215997 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-registry-tls\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.226469 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-bound-sa-token\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.226978 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbt9s\" (UniqueName: \"kubernetes.io/projected/aabd5b58-bbf0-4256-8909-4a78aebfb05e-kube-api-access-xbt9s\") pod \"image-registry-66df7c8f76-4r5p4\" (UID: \"aabd5b58-bbf0-4256-8909-4a78aebfb05e\") " pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.311744 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:55 crc kubenswrapper[4940]: I1126 07:01:55.512185 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-4r5p4"] Nov 26 07:01:55 crc kubenswrapper[4940]: W1126 07:01:55.517655 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaabd5b58_bbf0_4256_8909_4a78aebfb05e.slice/crio-07b472009c902e3e0bd0afea312f3e6bb6a75d9d7d132df34c439b522ab74052 WatchSource:0}: Error finding container 07b472009c902e3e0bd0afea312f3e6bb6a75d9d7d132df34c439b522ab74052: Status 404 returned error can't find the container with id 07b472009c902e3e0bd0afea312f3e6bb6a75d9d7d132df34c439b522ab74052 Nov 26 07:01:56 crc kubenswrapper[4940]: I1126 07:01:56.392213 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" event={"ID":"aabd5b58-bbf0-4256-8909-4a78aebfb05e","Type":"ContainerStarted","Data":"b803e3eecb66c4677f7ec6a6c009eb57f3c14506d45577e4dbee9a9748b6095f"} Nov 26 07:01:56 crc kubenswrapper[4940]: I1126 07:01:56.392815 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:01:56 crc kubenswrapper[4940]: I1126 07:01:56.392833 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" event={"ID":"aabd5b58-bbf0-4256-8909-4a78aebfb05e","Type":"ContainerStarted","Data":"07b472009c902e3e0bd0afea312f3e6bb6a75d9d7d132df34c439b522ab74052"} Nov 26 07:01:56 crc kubenswrapper[4940]: I1126 07:01:56.415290 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" podStartSLOduration=2.415220421 podStartE2EDuration="2.415220421s" podCreationTimestamp="2025-11-26 07:01:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:01:56.410737815 +0000 UTC m=+417.930879444" watchObservedRunningTime="2025-11-26 07:01:56.415220421 +0000 UTC m=+417.935362080" Nov 26 07:02:15 crc kubenswrapper[4940]: I1126 07:02:15.318507 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-4r5p4" Nov 26 07:02:15 crc kubenswrapper[4940]: I1126 07:02:15.386635 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sv7gt"] Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.442093 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" podUID="efe355d7-3ba1-451e-aebd-271c367e186c" containerName="registry" containerID="cri-o://a876ed16c09326ce9b2409438a1818e2d5dec88020d8fc0cfaa071b6f74bede0" gracePeriod=30 Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.648899 4940 generic.go:334] "Generic (PLEG): container finished" podID="efe355d7-3ba1-451e-aebd-271c367e186c" containerID="a876ed16c09326ce9b2409438a1818e2d5dec88020d8fc0cfaa071b6f74bede0" exitCode=0 Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.648947 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" event={"ID":"efe355d7-3ba1-451e-aebd-271c367e186c","Type":"ContainerDied","Data":"a876ed16c09326ce9b2409438a1818e2d5dec88020d8fc0cfaa071b6f74bede0"} Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.793388 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.887718 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/efe355d7-3ba1-451e-aebd-271c367e186c-ca-trust-extracted\") pod \"efe355d7-3ba1-451e-aebd-271c367e186c\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.887991 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"efe355d7-3ba1-451e-aebd-271c367e186c\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.888021 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/efe355d7-3ba1-451e-aebd-271c367e186c-installation-pull-secrets\") pod \"efe355d7-3ba1-451e-aebd-271c367e186c\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.888094 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwhkt\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-kube-api-access-pwhkt\") pod \"efe355d7-3ba1-451e-aebd-271c367e186c\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.888280 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-trusted-ca\") pod \"efe355d7-3ba1-451e-aebd-271c367e186c\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.888942 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "efe355d7-3ba1-451e-aebd-271c367e186c" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.889030 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-registry-tls\") pod \"efe355d7-3ba1-451e-aebd-271c367e186c\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.889382 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-registry-certificates\") pod \"efe355d7-3ba1-451e-aebd-271c367e186c\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.889493 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-bound-sa-token\") pod \"efe355d7-3ba1-451e-aebd-271c367e186c\" (UID: \"efe355d7-3ba1-451e-aebd-271c367e186c\") " Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.889753 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.890379 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "efe355d7-3ba1-451e-aebd-271c367e186c" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.893966 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "efe355d7-3ba1-451e-aebd-271c367e186c" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.894414 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-kube-api-access-pwhkt" (OuterVolumeSpecName: "kube-api-access-pwhkt") pod "efe355d7-3ba1-451e-aebd-271c367e186c" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c"). InnerVolumeSpecName "kube-api-access-pwhkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.895221 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "efe355d7-3ba1-451e-aebd-271c367e186c" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.896336 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efe355d7-3ba1-451e-aebd-271c367e186c-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "efe355d7-3ba1-451e-aebd-271c367e186c" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.901662 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "efe355d7-3ba1-451e-aebd-271c367e186c" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.908482 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efe355d7-3ba1-451e-aebd-271c367e186c-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "efe355d7-3ba1-451e-aebd-271c367e186c" (UID: "efe355d7-3ba1-451e-aebd-271c367e186c"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.990822 4940 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/efe355d7-3ba1-451e-aebd-271c367e186c-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.990862 4940 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/efe355d7-3ba1-451e-aebd-271c367e186c-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.990877 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwhkt\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-kube-api-access-pwhkt\") on node \"crc\" DevicePath \"\"" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.990889 4940 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.990900 4940 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/efe355d7-3ba1-451e-aebd-271c367e186c-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 07:02:40 crc kubenswrapper[4940]: I1126 07:02:40.990910 4940 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/efe355d7-3ba1-451e-aebd-271c367e186c-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 07:02:41 crc kubenswrapper[4940]: I1126 07:02:41.659994 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" event={"ID":"efe355d7-3ba1-451e-aebd-271c367e186c","Type":"ContainerDied","Data":"2e58b7e9eb9c2e01c234e4bdac8839b7d91e1215c43eccf6ee91b0a1cf48c07f"} Nov 26 07:02:41 crc kubenswrapper[4940]: I1126 07:02:41.660079 4940 scope.go:117] "RemoveContainer" containerID="a876ed16c09326ce9b2409438a1818e2d5dec88020d8fc0cfaa071b6f74bede0" Nov 26 07:02:41 crc kubenswrapper[4940]: I1126 07:02:41.660215 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sv7gt" Nov 26 07:02:41 crc kubenswrapper[4940]: I1126 07:02:41.677769 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sv7gt"] Nov 26 07:02:41 crc kubenswrapper[4940]: I1126 07:02:41.680937 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sv7gt"] Nov 26 07:02:43 crc kubenswrapper[4940]: I1126 07:02:43.172566 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efe355d7-3ba1-451e-aebd-271c367e186c" path="/var/lib/kubelet/pods/efe355d7-3ba1-451e-aebd-271c367e186c/volumes" Nov 26 07:03:51 crc kubenswrapper[4940]: I1126 07:03:51.728219 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:03:51 crc kubenswrapper[4940]: I1126 07:03:51.728830 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:04:21 crc kubenswrapper[4940]: I1126 07:04:21.728887 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:04:21 crc kubenswrapper[4940]: I1126 07:04:21.729542 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:04:51 crc kubenswrapper[4940]: I1126 07:04:51.728654 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:04:51 crc kubenswrapper[4940]: I1126 07:04:51.730318 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:04:51 crc kubenswrapper[4940]: I1126 07:04:51.730454 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:04:51 crc kubenswrapper[4940]: I1126 07:04:51.731238 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3eee2744c5ef6eeac0756de5055d0728c038b9c6865c21fd3c9a2ecfca6cd031"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:04:51 crc kubenswrapper[4940]: I1126 07:04:51.731422 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://3eee2744c5ef6eeac0756de5055d0728c038b9c6865c21fd3c9a2ecfca6cd031" gracePeriod=600 Nov 26 07:04:52 crc kubenswrapper[4940]: I1126 07:04:52.485453 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="3eee2744c5ef6eeac0756de5055d0728c038b9c6865c21fd3c9a2ecfca6cd031" exitCode=0 Nov 26 07:04:52 crc kubenswrapper[4940]: I1126 07:04:52.485527 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"3eee2744c5ef6eeac0756de5055d0728c038b9c6865c21fd3c9a2ecfca6cd031"} Nov 26 07:04:52 crc kubenswrapper[4940]: I1126 07:04:52.486086 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"4831b08e520666c63a46d4332bc996f659d9f6b347ced8e4ef4908839bbfa56c"} Nov 26 07:04:52 crc kubenswrapper[4940]: I1126 07:04:52.486190 4940 scope.go:117] "RemoveContainer" containerID="b3655cde675d2cf9e1ba94c8eb1aa758ad65396882a59af55936e02bf229c62e" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.178763 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ft8qp"] Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.179544 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" podUID="02eb3fbf-9bcf-4097-80da-07430ae0cceb" containerName="controller-manager" containerID="cri-o://be7991bc3e3f1529562d4326c7356a7a1859958962ff3e3f6d2325aa7766ddec" gracePeriod=30 Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.296508 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88"] Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.296740 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" podUID="e93e70f9-30a0-4254-8cc9-2988b9028297" containerName="route-controller-manager" containerID="cri-o://7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506" gracePeriod=30 Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.306540 4940 generic.go:334] "Generic (PLEG): container finished" podID="02eb3fbf-9bcf-4097-80da-07430ae0cceb" containerID="be7991bc3e3f1529562d4326c7356a7a1859958962ff3e3f6d2325aa7766ddec" exitCode=0 Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.306583 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" event={"ID":"02eb3fbf-9bcf-4097-80da-07430ae0cceb","Type":"ContainerDied","Data":"be7991bc3e3f1529562d4326c7356a7a1859958962ff3e3f6d2325aa7766ddec"} Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.509174 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.605297 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02eb3fbf-9bcf-4097-80da-07430ae0cceb-serving-cert\") pod \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.605337 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-client-ca\") pod \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.605359 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-config\") pod \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.605383 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69svt\" (UniqueName: \"kubernetes.io/projected/02eb3fbf-9bcf-4097-80da-07430ae0cceb-kube-api-access-69svt\") pod \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.605410 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-proxy-ca-bundles\") pod \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\" (UID: \"02eb3fbf-9bcf-4097-80da-07430ae0cceb\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.606146 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "02eb3fbf-9bcf-4097-80da-07430ae0cceb" (UID: "02eb3fbf-9bcf-4097-80da-07430ae0cceb"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.606158 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-client-ca" (OuterVolumeSpecName: "client-ca") pod "02eb3fbf-9bcf-4097-80da-07430ae0cceb" (UID: "02eb3fbf-9bcf-4097-80da-07430ae0cceb"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.606233 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-config" (OuterVolumeSpecName: "config") pod "02eb3fbf-9bcf-4097-80da-07430ae0cceb" (UID: "02eb3fbf-9bcf-4097-80da-07430ae0cceb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.617937 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02eb3fbf-9bcf-4097-80da-07430ae0cceb-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "02eb3fbf-9bcf-4097-80da-07430ae0cceb" (UID: "02eb3fbf-9bcf-4097-80da-07430ae0cceb"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.617932 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02eb3fbf-9bcf-4097-80da-07430ae0cceb-kube-api-access-69svt" (OuterVolumeSpecName: "kube-api-access-69svt") pod "02eb3fbf-9bcf-4097-80da-07430ae0cceb" (UID: "02eb3fbf-9bcf-4097-80da-07430ae0cceb"). InnerVolumeSpecName "kube-api-access-69svt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.706807 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02eb3fbf-9bcf-4097-80da-07430ae0cceb-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.706844 4940 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.706853 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.706861 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69svt\" (UniqueName: \"kubernetes.io/projected/02eb3fbf-9bcf-4097-80da-07430ae0cceb-kube-api-access-69svt\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.706872 4940 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/02eb3fbf-9bcf-4097-80da-07430ae0cceb-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.762676 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.919915 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-client-ca\") pod \"e93e70f9-30a0-4254-8cc9-2988b9028297\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.920010 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e93e70f9-30a0-4254-8cc9-2988b9028297-serving-cert\") pod \"e93e70f9-30a0-4254-8cc9-2988b9028297\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.920053 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-config\") pod \"e93e70f9-30a0-4254-8cc9-2988b9028297\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.920074 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6cht\" (UniqueName: \"kubernetes.io/projected/e93e70f9-30a0-4254-8cc9-2988b9028297-kube-api-access-t6cht\") pod \"e93e70f9-30a0-4254-8cc9-2988b9028297\" (UID: \"e93e70f9-30a0-4254-8cc9-2988b9028297\") " Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.920712 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-client-ca" (OuterVolumeSpecName: "client-ca") pod "e93e70f9-30a0-4254-8cc9-2988b9028297" (UID: "e93e70f9-30a0-4254-8cc9-2988b9028297"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.920734 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-config" (OuterVolumeSpecName: "config") pod "e93e70f9-30a0-4254-8cc9-2988b9028297" (UID: "e93e70f9-30a0-4254-8cc9-2988b9028297"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.922974 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e93e70f9-30a0-4254-8cc9-2988b9028297-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e93e70f9-30a0-4254-8cc9-2988b9028297" (UID: "e93e70f9-30a0-4254-8cc9-2988b9028297"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:04 crc kubenswrapper[4940]: I1126 07:07:04.923073 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e93e70f9-30a0-4254-8cc9-2988b9028297-kube-api-access-t6cht" (OuterVolumeSpecName: "kube-api-access-t6cht") pod "e93e70f9-30a0-4254-8cc9-2988b9028297" (UID: "e93e70f9-30a0-4254-8cc9-2988b9028297"). InnerVolumeSpecName "kube-api-access-t6cht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.021794 4940 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.021844 4940 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e93e70f9-30a0-4254-8cc9-2988b9028297-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.021857 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6cht\" (UniqueName: \"kubernetes.io/projected/e93e70f9-30a0-4254-8cc9-2988b9028297-kube-api-access-t6cht\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.021870 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e93e70f9-30a0-4254-8cc9-2988b9028297-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.314793 4940 generic.go:334] "Generic (PLEG): container finished" podID="e93e70f9-30a0-4254-8cc9-2988b9028297" containerID="7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506" exitCode=0 Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.314875 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" event={"ID":"e93e70f9-30a0-4254-8cc9-2988b9028297","Type":"ContainerDied","Data":"7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506"} Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.314892 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.314921 4940 scope.go:117] "RemoveContainer" containerID="7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.314908 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88" event={"ID":"e93e70f9-30a0-4254-8cc9-2988b9028297","Type":"ContainerDied","Data":"6b29bcdbe5e5901ae945b0f63d4517f25139d14ee62f34663e85f61fdeb28d36"} Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.316928 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" event={"ID":"02eb3fbf-9bcf-4097-80da-07430ae0cceb","Type":"ContainerDied","Data":"8d99d28270dfeda5cf906bb4e2770b2b0711df78bd34bcecf0b6a03204e706f4"} Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.317025 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ft8qp" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.333160 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88"] Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.336460 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8pf88"] Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.336703 4940 scope.go:117] "RemoveContainer" containerID="7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506" Nov 26 07:07:05 crc kubenswrapper[4940]: E1126 07:07:05.337129 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506\": container with ID starting with 7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506 not found: ID does not exist" containerID="7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.337158 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506"} err="failed to get container status \"7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506\": rpc error: code = NotFound desc = could not find container \"7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506\": container with ID starting with 7df2a3d8b52ecdfebcb435d88c680e1c585e276330b932571750de802cca5506 not found: ID does not exist" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.337184 4940 scope.go:117] "RemoveContainer" containerID="be7991bc3e3f1529562d4326c7356a7a1859958962ff3e3f6d2325aa7766ddec" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.345939 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ft8qp"] Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.349318 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ft8qp"] Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.833396 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-57cd86bc5c-d9729"] Nov 26 07:07:05 crc kubenswrapper[4940]: E1126 07:07:05.833778 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e93e70f9-30a0-4254-8cc9-2988b9028297" containerName="route-controller-manager" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.833808 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e93e70f9-30a0-4254-8cc9-2988b9028297" containerName="route-controller-manager" Nov 26 07:07:05 crc kubenswrapper[4940]: E1126 07:07:05.833829 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efe355d7-3ba1-451e-aebd-271c367e186c" containerName="registry" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.833841 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="efe355d7-3ba1-451e-aebd-271c367e186c" containerName="registry" Nov 26 07:07:05 crc kubenswrapper[4940]: E1126 07:07:05.833853 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02eb3fbf-9bcf-4097-80da-07430ae0cceb" containerName="controller-manager" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.833865 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="02eb3fbf-9bcf-4097-80da-07430ae0cceb" containerName="controller-manager" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.834067 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="efe355d7-3ba1-451e-aebd-271c367e186c" containerName="registry" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.834085 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="02eb3fbf-9bcf-4097-80da-07430ae0cceb" containerName="controller-manager" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.834101 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e93e70f9-30a0-4254-8cc9-2988b9028297" containerName="route-controller-manager" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.834699 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.837258 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx"] Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.837818 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.837961 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.838047 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.838400 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.838516 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.838626 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.841615 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.842349 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.842776 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.842783 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.842797 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.843382 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.843421 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.848903 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.852966 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-57cd86bc5c-d9729"] Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.855360 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx"] Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933589 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-config\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933626 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp7md\" (UniqueName: \"kubernetes.io/projected/49739d19-f75a-4cc5-8444-b375eb780403-kube-api-access-bp7md\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933665 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-proxy-ca-bundles\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933697 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/49739d19-f75a-4cc5-8444-b375eb780403-client-ca\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933711 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1fc30b6-b6da-4324-936c-46744a645fea-serving-cert\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933733 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-client-ca\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933751 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49739d19-f75a-4cc5-8444-b375eb780403-serving-cert\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933821 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwvqg\" (UniqueName: \"kubernetes.io/projected/a1fc30b6-b6da-4324-936c-46744a645fea-kube-api-access-mwvqg\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:05 crc kubenswrapper[4940]: I1126 07:07:05.933862 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49739d19-f75a-4cc5-8444-b375eb780403-config\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.034543 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/49739d19-f75a-4cc5-8444-b375eb780403-client-ca\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.034589 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1fc30b6-b6da-4324-936c-46744a645fea-serving-cert\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.034613 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-client-ca\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.034633 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49739d19-f75a-4cc5-8444-b375eb780403-serving-cert\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.034654 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwvqg\" (UniqueName: \"kubernetes.io/projected/a1fc30b6-b6da-4324-936c-46744a645fea-kube-api-access-mwvqg\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.035363 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49739d19-f75a-4cc5-8444-b375eb780403-config\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.035399 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-config\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.035415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bp7md\" (UniqueName: \"kubernetes.io/projected/49739d19-f75a-4cc5-8444-b375eb780403-kube-api-access-bp7md\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.035450 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-proxy-ca-bundles\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.035884 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/49739d19-f75a-4cc5-8444-b375eb780403-client-ca\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.036089 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-client-ca\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.036309 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-proxy-ca-bundles\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.036676 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49739d19-f75a-4cc5-8444-b375eb780403-config\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.036974 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1fc30b6-b6da-4324-936c-46744a645fea-config\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.039796 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49739d19-f75a-4cc5-8444-b375eb780403-serving-cert\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.051157 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwvqg\" (UniqueName: \"kubernetes.io/projected/a1fc30b6-b6da-4324-936c-46744a645fea-kube-api-access-mwvqg\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.052541 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1fc30b6-b6da-4324-936c-46744a645fea-serving-cert\") pod \"controller-manager-57cd86bc5c-d9729\" (UID: \"a1fc30b6-b6da-4324-936c-46744a645fea\") " pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.052808 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp7md\" (UniqueName: \"kubernetes.io/projected/49739d19-f75a-4cc5-8444-b375eb780403-kube-api-access-bp7md\") pod \"route-controller-manager-86df7fd44d-w5qqx\" (UID: \"49739d19-f75a-4cc5-8444-b375eb780403\") " pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.155577 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.169358 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.359802 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx"] Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.384600 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-57cd86bc5c-d9729"] Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.452489 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lj789"] Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.454608 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="northd" containerID="cri-o://79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.454712 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="sbdb" containerID="cri-o://9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.454754 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="nbdb" containerID="cri-o://a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.454815 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kube-rbac-proxy-node" containerID="cri-o://5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.454853 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.454891 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovn-acl-logging" containerID="cri-o://8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.455129 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovn-controller" containerID="cri-o://ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.505347 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" containerID="cri-o://3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.850850 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/3.log" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.852888 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovn-acl-logging/0.log" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.853305 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovn-controller/0.log" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.853705 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946588 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-etc-openvswitch\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946640 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-systemd\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946674 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-config\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946711 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946724 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946752 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-script-lib\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946785 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-node-log\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946805 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946817 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovn-node-metrics-cert\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946879 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-bin\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946906 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-slash\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946931 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-env-overrides\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.946983 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-ovn\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947009 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-netns\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947059 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-ovn-kubernetes\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947085 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-systemd-units\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947104 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-openvswitch\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947130 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-kubelet\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947150 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-netd\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947183 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-log-socket\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947207 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-var-lib-openvswitch\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947232 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947236 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmgxv\" (UniqueName: \"kubernetes.io/projected/69c1adb3-d9e7-4302-89d2-60745597f2cc-kube-api-access-lmgxv\") pod \"69c1adb3-d9e7-4302-89d2-60745597f2cc\" (UID: \"69c1adb3-d9e7-4302-89d2-60745597f2cc\") " Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947515 4940 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947539 4940 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947554 4940 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947586 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947614 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947635 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-slash" (OuterVolumeSpecName: "host-slash") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947696 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947741 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947770 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-node-log" (OuterVolumeSpecName: "node-log") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947808 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947832 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947855 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947879 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-log-socket" (OuterVolumeSpecName: "log-socket") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947901 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947913 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.947925 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.948067 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957090 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mk75l"] Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957578 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kubecfg-setup" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957595 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kubecfg-setup" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957603 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovn-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957610 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovn-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957619 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957626 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957632 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957638 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957646 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="sbdb" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957653 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="sbdb" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957660 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957666 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957676 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovn-acl-logging" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957682 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovn-acl-logging" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957690 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957696 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957703 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kube-rbac-proxy-node" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957709 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kube-rbac-proxy-node" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957715 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="northd" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957722 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="northd" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957730 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="nbdb" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957735 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="nbdb" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957820 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957829 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="sbdb" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957837 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957843 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957849 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovn-acl-logging" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957856 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="northd" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957863 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957872 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="nbdb" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957880 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="kube-rbac-proxy-node" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957886 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovn-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.957978 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.957985 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.975179 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.975219 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: E1126 07:07:06.975382 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.975392 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerName="ovnkube-controller" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.975648 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.976791 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.978497 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69c1adb3-d9e7-4302-89d2-60745597f2cc-kube-api-access-lmgxv" (OuterVolumeSpecName: "kube-api-access-lmgxv") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "kube-api-access-lmgxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:06 crc kubenswrapper[4940]: I1126 07:07:06.979460 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "69c1adb3-d9e7-4302-89d2-60745597f2cc" (UID: "69c1adb3-d9e7-4302-89d2-60745597f2cc"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049118 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovn-node-metrics-cert\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049189 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-etc-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049220 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovnkube-config\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049267 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-systemd-units\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049371 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-log-socket\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049424 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovnkube-script-lib\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049454 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx5c4\" (UniqueName: \"kubernetes.io/projected/3cd87911-48a9-47b3-972b-10e5fe63f06f-kube-api-access-fx5c4\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049471 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-systemd\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049499 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049560 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-ovn\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049591 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-cni-netd\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049616 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049641 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-kubelet\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049678 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-cni-bin\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049702 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-run-netns\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049724 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-var-lib-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049804 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-env-overrides\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049867 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-slash\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049891 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-node-log\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.049916 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-run-ovn-kubernetes\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050097 4940 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050118 4940 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-node-log\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050133 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/69c1adb3-d9e7-4302-89d2-60745597f2cc-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050148 4940 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050160 4940 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-slash\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050171 4940 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/69c1adb3-d9e7-4302-89d2-60745597f2cc-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050181 4940 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050194 4940 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050208 4940 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050220 4940 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050231 4940 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050243 4940 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050253 4940 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050265 4940 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-log-socket\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050276 4940 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050287 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmgxv\" (UniqueName: \"kubernetes.io/projected/69c1adb3-d9e7-4302-89d2-60745597f2cc-kube-api-access-lmgxv\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.050298 4940 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/69c1adb3-d9e7-4302-89d2-60745597f2cc-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151027 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-log-socket\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151100 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovnkube-script-lib\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151129 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx5c4\" (UniqueName: \"kubernetes.io/projected/3cd87911-48a9-47b3-972b-10e5fe63f06f-kube-api-access-fx5c4\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151148 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-log-socket\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151148 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-systemd\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151255 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151203 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-systemd\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151299 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-ovn\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151316 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-cni-netd\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151332 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151330 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151361 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-run-ovn\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151374 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-kubelet\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151356 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-kubelet\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151396 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-cni-netd\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151409 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151426 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-cni-bin\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151448 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-run-netns\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151463 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-var-lib-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151482 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-env-overrides\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151500 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-cni-bin\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151516 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-var-lib-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151503 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-run-netns\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151506 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-slash\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151534 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-slash\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151587 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-node-log\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151617 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-run-ovn-kubernetes\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151663 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovn-node-metrics-cert\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151670 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-node-log\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151691 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-etc-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151710 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-etc-openvswitch\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151718 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovnkube-config\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151747 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-systemd-units\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151688 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-host-run-ovn-kubernetes\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.151823 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3cd87911-48a9-47b3-972b-10e5fe63f06f-systemd-units\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.152061 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovnkube-script-lib\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.152140 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-env-overrides\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.152224 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovnkube-config\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.156433 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3cd87911-48a9-47b3-972b-10e5fe63f06f-ovn-node-metrics-cert\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.171493 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02eb3fbf-9bcf-4097-80da-07430ae0cceb" path="/var/lib/kubelet/pods/02eb3fbf-9bcf-4097-80da-07430ae0cceb/volumes" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.172328 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e93e70f9-30a0-4254-8cc9-2988b9028297" path="/var/lib/kubelet/pods/e93e70f9-30a0-4254-8cc9-2988b9028297/volumes" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.174656 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx5c4\" (UniqueName: \"kubernetes.io/projected/3cd87911-48a9-47b3-972b-10e5fe63f06f-kube-api-access-fx5c4\") pod \"ovnkube-node-mk75l\" (UID: \"3cd87911-48a9-47b3-972b-10e5fe63f06f\") " pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.291728 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.341508 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" event={"ID":"49739d19-f75a-4cc5-8444-b375eb780403","Type":"ContainerStarted","Data":"fa6795cdd548ae32ef70fa0cc6acc22bf3e190f34a544f17dbfc7f5ca0db7c5c"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.341578 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" event={"ID":"49739d19-f75a-4cc5-8444-b375eb780403","Type":"ContainerStarted","Data":"bda472d3424c000a1a5266a989d715fb16b287c3776fc0e25f425374fd53510c"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.342204 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.343706 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gnvm5_c9ec0fa1-713d-4824-9a3a-a20eff8c65e0/kube-multus/1.log" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.344413 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gnvm5_c9ec0fa1-713d-4824-9a3a-a20eff8c65e0/kube-multus/0.log" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.344459 4940 generic.go:334] "Generic (PLEG): container finished" podID="c9ec0fa1-713d-4824-9a3a-a20eff8c65e0" containerID="9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e" exitCode=2 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.344520 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gnvm5" event={"ID":"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0","Type":"ContainerDied","Data":"9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.344586 4940 scope.go:117] "RemoveContainer" containerID="4efd6d6939e39f7c7765a24d86f67a632d00ca5251b5310cabd3fc955e7bc8fb" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.344983 4940 scope.go:117] "RemoveContainer" containerID="9f11edcea432aa25e467ea1729d22016b381469d2cbe5b6b9ef72529fffb9e1e" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.345656 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"321936dfad41efeb13c7cafbf44d33836fa31fadb81e6d86293b8e64ad62618d"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.348274 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovnkube-controller/3.log" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.352793 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovn-acl-logging/0.log" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.353533 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lj789_69c1adb3-d9e7-4302-89d2-60745597f2cc/ovn-controller/0.log" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.354754 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355426 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18" exitCode=0 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355461 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05" exitCode=0 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355472 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca" exitCode=0 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355483 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61" exitCode=0 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355495 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce" exitCode=0 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355506 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4" exitCode=0 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355519 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec" exitCode=143 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355530 4940 generic.go:334] "Generic (PLEG): container finished" podID="69c1adb3-d9e7-4302-89d2-60745597f2cc" containerID="ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688" exitCode=143 Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355587 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355611 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355627 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355641 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355654 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355666 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355679 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355692 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355700 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355709 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355717 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355724 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355731 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355738 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355745 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355751 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355761 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355771 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355779 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355786 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355794 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355801 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355808 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355815 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355822 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355829 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355837 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355849 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355861 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355869 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355876 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355883 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355890 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355897 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355905 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355914 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355923 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355932 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355944 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" event={"ID":"69c1adb3-d9e7-4302-89d2-60745597f2cc","Type":"ContainerDied","Data":"5545c6b07e356addc8a0191918268101fa01660694a2cb0f5f7f779ea23dcd03"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355957 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355969 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355977 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.355984 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.356018 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.356028 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.356055 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.356065 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.356074 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.356083 4940 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.356203 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lj789" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.369747 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" event={"ID":"a1fc30b6-b6da-4324-936c-46744a645fea","Type":"ContainerStarted","Data":"480f8573743c9b71d0ed3edaa44b377ac11b37dcaa61957c9d559f65b70ce361"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.369806 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" event={"ID":"a1fc30b6-b6da-4324-936c-46744a645fea","Type":"ContainerStarted","Data":"424742980ee58205789bb5d4fe57b2363431ece4d4c042729558e702a08c46b9"} Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.371745 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.372724 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-86df7fd44d-w5qqx" podStartSLOduration=3.372709356 podStartE2EDuration="3.372709356s" podCreationTimestamp="2025-11-26 07:07:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:07.368563146 +0000 UTC m=+728.888704785" watchObservedRunningTime="2025-11-26 07:07:07.372709356 +0000 UTC m=+728.892850985" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.383648 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.433546 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-57cd86bc5c-d9729" podStartSLOduration=3.433526348 podStartE2EDuration="3.433526348s" podCreationTimestamp="2025-11-26 07:07:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:07.432222896 +0000 UTC m=+728.952364525" watchObservedRunningTime="2025-11-26 07:07:07.433526348 +0000 UTC m=+728.953667977" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.468470 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lj789"] Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.472745 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lj789"] Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.472900 4940 scope.go:117] "RemoveContainer" containerID="3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.493689 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.509920 4940 scope.go:117] "RemoveContainer" containerID="9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.526311 4940 scope.go:117] "RemoveContainer" containerID="a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.536868 4940 scope.go:117] "RemoveContainer" containerID="79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.556087 4940 scope.go:117] "RemoveContainer" containerID="e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.582413 4940 scope.go:117] "RemoveContainer" containerID="5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.601532 4940 scope.go:117] "RemoveContainer" containerID="8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.655186 4940 scope.go:117] "RemoveContainer" containerID="ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.673292 4940 scope.go:117] "RemoveContainer" containerID="d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.699671 4940 scope.go:117] "RemoveContainer" containerID="3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.700704 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": container with ID starting with 3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18 not found: ID does not exist" containerID="3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.700743 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} err="failed to get container status \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": rpc error: code = NotFound desc = could not find container \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": container with ID starting with 3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.700770 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.701588 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": container with ID starting with a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a not found: ID does not exist" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.701617 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} err="failed to get container status \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": rpc error: code = NotFound desc = could not find container \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": container with ID starting with a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.701636 4940 scope.go:117] "RemoveContainer" containerID="9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.702500 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": container with ID starting with 9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05 not found: ID does not exist" containerID="9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.702531 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} err="failed to get container status \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": rpc error: code = NotFound desc = could not find container \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": container with ID starting with 9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.702551 4940 scope.go:117] "RemoveContainer" containerID="a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.702936 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": container with ID starting with a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca not found: ID does not exist" containerID="a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.702962 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} err="failed to get container status \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": rpc error: code = NotFound desc = could not find container \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": container with ID starting with a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.702980 4940 scope.go:117] "RemoveContainer" containerID="79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.703301 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": container with ID starting with 79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61 not found: ID does not exist" containerID="79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.703330 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} err="failed to get container status \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": rpc error: code = NotFound desc = could not find container \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": container with ID starting with 79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.703349 4940 scope.go:117] "RemoveContainer" containerID="e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.703701 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": container with ID starting with e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce not found: ID does not exist" containerID="e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.703728 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} err="failed to get container status \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": rpc error: code = NotFound desc = could not find container \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": container with ID starting with e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.703746 4940 scope.go:117] "RemoveContainer" containerID="5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.704098 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": container with ID starting with 5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4 not found: ID does not exist" containerID="5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.704138 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} err="failed to get container status \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": rpc error: code = NotFound desc = could not find container \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": container with ID starting with 5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.704164 4940 scope.go:117] "RemoveContainer" containerID="8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.705246 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": container with ID starting with 8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec not found: ID does not exist" containerID="8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.705290 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} err="failed to get container status \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": rpc error: code = NotFound desc = could not find container \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": container with ID starting with 8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.705314 4940 scope.go:117] "RemoveContainer" containerID="ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.705789 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": container with ID starting with ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688 not found: ID does not exist" containerID="ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.705811 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} err="failed to get container status \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": rpc error: code = NotFound desc = could not find container \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": container with ID starting with ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.705830 4940 scope.go:117] "RemoveContainer" containerID="d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71" Nov 26 07:07:07 crc kubenswrapper[4940]: E1126 07:07:07.706433 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": container with ID starting with d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71 not found: ID does not exist" containerID="d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.706465 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} err="failed to get container status \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": rpc error: code = NotFound desc = could not find container \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": container with ID starting with d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.706485 4940 scope.go:117] "RemoveContainer" containerID="3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.706781 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} err="failed to get container status \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": rpc error: code = NotFound desc = could not find container \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": container with ID starting with 3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.706800 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.707060 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} err="failed to get container status \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": rpc error: code = NotFound desc = could not find container \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": container with ID starting with a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.707076 4940 scope.go:117] "RemoveContainer" containerID="9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.707964 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} err="failed to get container status \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": rpc error: code = NotFound desc = could not find container \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": container with ID starting with 9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.707983 4940 scope.go:117] "RemoveContainer" containerID="a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.708300 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} err="failed to get container status \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": rpc error: code = NotFound desc = could not find container \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": container with ID starting with a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.708346 4940 scope.go:117] "RemoveContainer" containerID="79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.708824 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} err="failed to get container status \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": rpc error: code = NotFound desc = could not find container \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": container with ID starting with 79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.708896 4940 scope.go:117] "RemoveContainer" containerID="e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.709245 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} err="failed to get container status \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": rpc error: code = NotFound desc = could not find container \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": container with ID starting with e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.709274 4940 scope.go:117] "RemoveContainer" containerID="5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.709518 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} err="failed to get container status \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": rpc error: code = NotFound desc = could not find container \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": container with ID starting with 5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.709537 4940 scope.go:117] "RemoveContainer" containerID="8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.709786 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} err="failed to get container status \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": rpc error: code = NotFound desc = could not find container \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": container with ID starting with 8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.709821 4940 scope.go:117] "RemoveContainer" containerID="ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.710189 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} err="failed to get container status \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": rpc error: code = NotFound desc = could not find container \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": container with ID starting with ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.710226 4940 scope.go:117] "RemoveContainer" containerID="d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.710461 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} err="failed to get container status \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": rpc error: code = NotFound desc = could not find container \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": container with ID starting with d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.710499 4940 scope.go:117] "RemoveContainer" containerID="3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.710755 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} err="failed to get container status \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": rpc error: code = NotFound desc = could not find container \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": container with ID starting with 3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.710784 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.711013 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} err="failed to get container status \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": rpc error: code = NotFound desc = could not find container \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": container with ID starting with a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.711050 4940 scope.go:117] "RemoveContainer" containerID="9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.711277 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} err="failed to get container status \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": rpc error: code = NotFound desc = could not find container \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": container with ID starting with 9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.711329 4940 scope.go:117] "RemoveContainer" containerID="a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.711691 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} err="failed to get container status \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": rpc error: code = NotFound desc = could not find container \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": container with ID starting with a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.711707 4940 scope.go:117] "RemoveContainer" containerID="79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.712018 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} err="failed to get container status \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": rpc error: code = NotFound desc = could not find container \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": container with ID starting with 79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.712048 4940 scope.go:117] "RemoveContainer" containerID="e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.712319 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} err="failed to get container status \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": rpc error: code = NotFound desc = could not find container \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": container with ID starting with e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.712336 4940 scope.go:117] "RemoveContainer" containerID="5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.712582 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} err="failed to get container status \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": rpc error: code = NotFound desc = could not find container \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": container with ID starting with 5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.712599 4940 scope.go:117] "RemoveContainer" containerID="8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.712797 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} err="failed to get container status \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": rpc error: code = NotFound desc = could not find container \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": container with ID starting with 8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.712812 4940 scope.go:117] "RemoveContainer" containerID="ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.713023 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} err="failed to get container status \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": rpc error: code = NotFound desc = could not find container \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": container with ID starting with ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.713077 4940 scope.go:117] "RemoveContainer" containerID="d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.713331 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} err="failed to get container status \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": rpc error: code = NotFound desc = could not find container \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": container with ID starting with d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.713347 4940 scope.go:117] "RemoveContainer" containerID="3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.713660 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18"} err="failed to get container status \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": rpc error: code = NotFound desc = could not find container \"3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18\": container with ID starting with 3cd68e41b96cbce42aa3db4a6129e6be55f04fc482c127f087dd090c23575e18 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.713680 4940 scope.go:117] "RemoveContainer" containerID="a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.714137 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a"} err="failed to get container status \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": rpc error: code = NotFound desc = could not find container \"a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a\": container with ID starting with a8902f69ed070d02559afe9849d673a29415937ad56fd3ec8890d636c36f714a not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.714165 4940 scope.go:117] "RemoveContainer" containerID="9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.714430 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05"} err="failed to get container status \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": rpc error: code = NotFound desc = could not find container \"9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05\": container with ID starting with 9f40e4bd339e2b9e63dbfe29221ccc95cfeda8bd377bbdee0ad2824514aabc05 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.714455 4940 scope.go:117] "RemoveContainer" containerID="a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.714752 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca"} err="failed to get container status \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": rpc error: code = NotFound desc = could not find container \"a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca\": container with ID starting with a97d502919c225a59dc8d79213c5fbf19c71ea5e17cf9cba49ced02f17931dca not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.714774 4940 scope.go:117] "RemoveContainer" containerID="79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.715124 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61"} err="failed to get container status \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": rpc error: code = NotFound desc = could not find container \"79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61\": container with ID starting with 79de2e70783cffba7439703fb12553575e4461ab1e1e079696104ef7c2818c61 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.715142 4940 scope.go:117] "RemoveContainer" containerID="e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.715490 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce"} err="failed to get container status \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": rpc error: code = NotFound desc = could not find container \"e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce\": container with ID starting with e74f631b383f2c9399700741c71849f93ae9c1f2ef6b7b37ee1035d998b7bbce not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.715510 4940 scope.go:117] "RemoveContainer" containerID="5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.716973 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4"} err="failed to get container status \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": rpc error: code = NotFound desc = could not find container \"5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4\": container with ID starting with 5972640b8dadae46f89295c758e7bf10bb80ab7d12637196bd46bb2f0df77de4 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.716993 4940 scope.go:117] "RemoveContainer" containerID="8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.717710 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec"} err="failed to get container status \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": rpc error: code = NotFound desc = could not find container \"8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec\": container with ID starting with 8e9eaf5178fd3887bce235e528f4d0dbaf7d59fe35f02a8838dd30b32ff0a8ec not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.717749 4940 scope.go:117] "RemoveContainer" containerID="ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.719619 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688"} err="failed to get container status \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": rpc error: code = NotFound desc = could not find container \"ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688\": container with ID starting with ba227920c6296db2145beb2d21387bab98b89f57d3d9716928419ef739af6688 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.719657 4940 scope.go:117] "RemoveContainer" containerID="d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71" Nov 26 07:07:07 crc kubenswrapper[4940]: I1126 07:07:07.720759 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71"} err="failed to get container status \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": rpc error: code = NotFound desc = could not find container \"d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71\": container with ID starting with d4a2c89046fa3ba7187757948207a64a71231439f49ca6af34b32a31d9ee2b71 not found: ID does not exist" Nov 26 07:07:08 crc kubenswrapper[4940]: I1126 07:07:08.375642 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gnvm5_c9ec0fa1-713d-4824-9a3a-a20eff8c65e0/kube-multus/1.log" Nov 26 07:07:08 crc kubenswrapper[4940]: I1126 07:07:08.375730 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gnvm5" event={"ID":"c9ec0fa1-713d-4824-9a3a-a20eff8c65e0","Type":"ContainerStarted","Data":"769d64128c6a3c613b845c0d42630e315a6c41793647fdc29c51fea590c4011f"} Nov 26 07:07:08 crc kubenswrapper[4940]: I1126 07:07:08.377264 4940 generic.go:334] "Generic (PLEG): container finished" podID="3cd87911-48a9-47b3-972b-10e5fe63f06f" containerID="9d60e76a473948e686d4714eec0848b252e7efbfbf99fcbe14ef8bd6267a6e4f" exitCode=0 Nov 26 07:07:08 crc kubenswrapper[4940]: I1126 07:07:08.377290 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerDied","Data":"9d60e76a473948e686d4714eec0848b252e7efbfbf99fcbe14ef8bd6267a6e4f"} Nov 26 07:07:09 crc kubenswrapper[4940]: I1126 07:07:09.173443 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69c1adb3-d9e7-4302-89d2-60745597f2cc" path="/var/lib/kubelet/pods/69c1adb3-d9e7-4302-89d2-60745597f2cc/volumes" Nov 26 07:07:09 crc kubenswrapper[4940]: I1126 07:07:09.389276 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"a9292c0e7c77465a846044365f0ef04041f1729fe8c22e9dfb2bcd37d1fabc17"} Nov 26 07:07:09 crc kubenswrapper[4940]: I1126 07:07:09.389334 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"df4acae797f835cb36ffe284a58c4199eb3d760a715b84c374a789a105c00882"} Nov 26 07:07:09 crc kubenswrapper[4940]: I1126 07:07:09.389349 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"304176fd505f3729187792a33c9e8eeb7d481795de13ec4f86b9d75d91c1e114"} Nov 26 07:07:09 crc kubenswrapper[4940]: I1126 07:07:09.389361 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"ef3c48deee0c403d5361dd78a0deaa04cf85076eefe4996002c243bd56013bfb"} Nov 26 07:07:09 crc kubenswrapper[4940]: I1126 07:07:09.389373 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"fd355e6ac262a8c92e024c25c8451e16a92f6464ceed72f58154956d501983a2"} Nov 26 07:07:10 crc kubenswrapper[4940]: I1126 07:07:10.104957 4940 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 07:07:10 crc kubenswrapper[4940]: I1126 07:07:10.398579 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"0344fbd3bfe8f16aca44f227f42fcbf6ebe7f1cbdab459ee8977d4df4614085d"} Nov 26 07:07:12 crc kubenswrapper[4940]: I1126 07:07:12.417276 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"f00af483e62e19b08199a6195f7860af3189c04c3fbb56fa88b827411587990b"} Nov 26 07:07:15 crc kubenswrapper[4940]: I1126 07:07:15.436277 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" event={"ID":"3cd87911-48a9-47b3-972b-10e5fe63f06f","Type":"ContainerStarted","Data":"91585520a58c6e632c4935633923efcf47709d307835e25c323a2d12f0666ef1"} Nov 26 07:07:15 crc kubenswrapper[4940]: I1126 07:07:15.436865 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:15 crc kubenswrapper[4940]: I1126 07:07:15.436883 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:15 crc kubenswrapper[4940]: I1126 07:07:15.436894 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:15 crc kubenswrapper[4940]: I1126 07:07:15.462365 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:15 crc kubenswrapper[4940]: I1126 07:07:15.465191 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:15 crc kubenswrapper[4940]: I1126 07:07:15.469949 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" podStartSLOduration=9.469934992 podStartE2EDuration="9.469934992s" podCreationTimestamp="2025-11-26 07:07:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:15.468183897 +0000 UTC m=+736.988325536" watchObservedRunningTime="2025-11-26 07:07:15.469934992 +0000 UTC m=+736.990076621" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.155908 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-xnnpc"] Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.157161 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.161207 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.161270 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.161506 4940 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-hqrk2" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.162200 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.171350 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-xnnpc"] Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.284648 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7z4b\" (UniqueName: \"kubernetes.io/projected/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-kube-api-access-g7z4b\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.284721 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-crc-storage\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.284908 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-node-mnt\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.386557 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-node-mnt\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.386702 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7z4b\" (UniqueName: \"kubernetes.io/projected/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-kube-api-access-g7z4b\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.386777 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-crc-storage\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.386876 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-node-mnt\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.388366 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-crc-storage\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.412001 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7z4b\" (UniqueName: \"kubernetes.io/projected/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-kube-api-access-g7z4b\") pod \"crc-storage-crc-xnnpc\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.481522 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.917616 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-xnnpc"] Nov 26 07:07:18 crc kubenswrapper[4940]: W1126 07:07:18.926343 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e65a658_9d09_4b66_9e51_f2e6fd7bf84d.slice/crio-9a68c8140d80bd8f1d5c24e8a97498eb4c62c331bfcd660dd9b440ca39246409 WatchSource:0}: Error finding container 9a68c8140d80bd8f1d5c24e8a97498eb4c62c331bfcd660dd9b440ca39246409: Status 404 returned error can't find the container with id 9a68c8140d80bd8f1d5c24e8a97498eb4c62c331bfcd660dd9b440ca39246409 Nov 26 07:07:18 crc kubenswrapper[4940]: I1126 07:07:18.933365 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:07:19 crc kubenswrapper[4940]: I1126 07:07:19.457737 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-xnnpc" event={"ID":"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d","Type":"ContainerStarted","Data":"9a68c8140d80bd8f1d5c24e8a97498eb4c62c331bfcd660dd9b440ca39246409"} Nov 26 07:07:21 crc kubenswrapper[4940]: I1126 07:07:21.472600 4940 generic.go:334] "Generic (PLEG): container finished" podID="8e65a658-9d09-4b66-9e51-f2e6fd7bf84d" containerID="603e87a6f1a4546633c64ac4957fae9eba89b0435e784ab8317cce2415fc6b1c" exitCode=0 Nov 26 07:07:21 crc kubenswrapper[4940]: I1126 07:07:21.472965 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-xnnpc" event={"ID":"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d","Type":"ContainerDied","Data":"603e87a6f1a4546633c64ac4957fae9eba89b0435e784ab8317cce2415fc6b1c"} Nov 26 07:07:21 crc kubenswrapper[4940]: I1126 07:07:21.729307 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:07:21 crc kubenswrapper[4940]: I1126 07:07:21.729377 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:07:22 crc kubenswrapper[4940]: I1126 07:07:22.807616 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:22 crc kubenswrapper[4940]: I1126 07:07:22.949795 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7z4b\" (UniqueName: \"kubernetes.io/projected/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-kube-api-access-g7z4b\") pod \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " Nov 26 07:07:22 crc kubenswrapper[4940]: I1126 07:07:22.949844 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-node-mnt\") pod \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " Nov 26 07:07:22 crc kubenswrapper[4940]: I1126 07:07:22.949872 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-crc-storage\") pod \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\" (UID: \"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d\") " Nov 26 07:07:22 crc kubenswrapper[4940]: I1126 07:07:22.950007 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "8e65a658-9d09-4b66-9e51-f2e6fd7bf84d" (UID: "8e65a658-9d09-4b66-9e51-f2e6fd7bf84d"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:07:22 crc kubenswrapper[4940]: I1126 07:07:22.950135 4940 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:22 crc kubenswrapper[4940]: I1126 07:07:22.955338 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-kube-api-access-g7z4b" (OuterVolumeSpecName: "kube-api-access-g7z4b") pod "8e65a658-9d09-4b66-9e51-f2e6fd7bf84d" (UID: "8e65a658-9d09-4b66-9e51-f2e6fd7bf84d"). InnerVolumeSpecName "kube-api-access-g7z4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:22 crc kubenswrapper[4940]: I1126 07:07:22.962839 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "8e65a658-9d09-4b66-9e51-f2e6fd7bf84d" (UID: "8e65a658-9d09-4b66-9e51-f2e6fd7bf84d"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:23 crc kubenswrapper[4940]: I1126 07:07:23.051621 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7z4b\" (UniqueName: \"kubernetes.io/projected/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-kube-api-access-g7z4b\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:23 crc kubenswrapper[4940]: I1126 07:07:23.051652 4940 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:23 crc kubenswrapper[4940]: I1126 07:07:23.489861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-xnnpc" event={"ID":"8e65a658-9d09-4b66-9e51-f2e6fd7bf84d","Type":"ContainerDied","Data":"9a68c8140d80bd8f1d5c24e8a97498eb4c62c331bfcd660dd9b440ca39246409"} Nov 26 07:07:23 crc kubenswrapper[4940]: I1126 07:07:23.489977 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a68c8140d80bd8f1d5c24e8a97498eb4c62c331bfcd660dd9b440ca39246409" Nov 26 07:07:23 crc kubenswrapper[4940]: I1126 07:07:23.490011 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-xnnpc" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.680281 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm"] Nov 26 07:07:29 crc kubenswrapper[4940]: E1126 07:07:29.681094 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e65a658-9d09-4b66-9e51-f2e6fd7bf84d" containerName="storage" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.681109 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e65a658-9d09-4b66-9e51-f2e6fd7bf84d" containerName="storage" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.681229 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e65a658-9d09-4b66-9e51-f2e6fd7bf84d" containerName="storage" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.682002 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.683724 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.689261 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm"] Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.835817 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.835919 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjxmz\" (UniqueName: \"kubernetes.io/projected/d3a179ca-909c-4b27-a176-e91d5d64399b-kube-api-access-hjxmz\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.836451 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.938387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.938500 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjxmz\" (UniqueName: \"kubernetes.io/projected/d3a179ca-909c-4b27-a176-e91d5d64399b-kube-api-access-hjxmz\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.938580 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.939258 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.939302 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:29 crc kubenswrapper[4940]: I1126 07:07:29.964837 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjxmz\" (UniqueName: \"kubernetes.io/projected/d3a179ca-909c-4b27-a176-e91d5d64399b-kube-api-access-hjxmz\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:30 crc kubenswrapper[4940]: I1126 07:07:30.003021 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:30 crc kubenswrapper[4940]: I1126 07:07:30.454954 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm"] Nov 26 07:07:30 crc kubenswrapper[4940]: I1126 07:07:30.525845 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" event={"ID":"d3a179ca-909c-4b27-a176-e91d5d64399b","Type":"ContainerStarted","Data":"855b1c44018a23759e9d0fa68f48fd9d3070caa27f0a83c64cdb32f25338d779"} Nov 26 07:07:31 crc kubenswrapper[4940]: I1126 07:07:31.532319 4940 generic.go:334] "Generic (PLEG): container finished" podID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerID="dc3d7f278f6c53033f302e62665f3349ed9cd7316f372fb516fadc14047de74e" exitCode=0 Nov 26 07:07:31 crc kubenswrapper[4940]: I1126 07:07:31.532396 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" event={"ID":"d3a179ca-909c-4b27-a176-e91d5d64399b","Type":"ContainerDied","Data":"dc3d7f278f6c53033f302e62665f3349ed9cd7316f372fb516fadc14047de74e"} Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.036155 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pqx67"] Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.038371 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.052953 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqx67"] Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.164709 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-utilities\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.164780 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n86lx\" (UniqueName: \"kubernetes.io/projected/3dac3e04-ae30-4b07-9f0d-6915d7d50205-kube-api-access-n86lx\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.164813 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-catalog-content\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.266417 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n86lx\" (UniqueName: \"kubernetes.io/projected/3dac3e04-ae30-4b07-9f0d-6915d7d50205-kube-api-access-n86lx\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.266496 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-catalog-content\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.266577 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-utilities\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.267091 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-utilities\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.267117 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-catalog-content\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.287875 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n86lx\" (UniqueName: \"kubernetes.io/projected/3dac3e04-ae30-4b07-9f0d-6915d7d50205-kube-api-access-n86lx\") pod \"redhat-operators-pqx67\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.363603 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:32 crc kubenswrapper[4940]: I1126 07:07:32.792025 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqx67"] Nov 26 07:07:33 crc kubenswrapper[4940]: I1126 07:07:33.547547 4940 generic.go:334] "Generic (PLEG): container finished" podID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerID="50610aeab0be96c69af131b9c205c9d2ff0896e7cc087726ec75bf9aa61f227f" exitCode=0 Nov 26 07:07:33 crc kubenswrapper[4940]: I1126 07:07:33.547643 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqx67" event={"ID":"3dac3e04-ae30-4b07-9f0d-6915d7d50205","Type":"ContainerDied","Data":"50610aeab0be96c69af131b9c205c9d2ff0896e7cc087726ec75bf9aa61f227f"} Nov 26 07:07:33 crc kubenswrapper[4940]: I1126 07:07:33.547949 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqx67" event={"ID":"3dac3e04-ae30-4b07-9f0d-6915d7d50205","Type":"ContainerStarted","Data":"5d3472a61a5cdbab7a819224a43070d0ff2bc1f43e17cdc71cec3e5411cbf104"} Nov 26 07:07:34 crc kubenswrapper[4940]: I1126 07:07:34.559142 4940 generic.go:334] "Generic (PLEG): container finished" podID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerID="9b4eb30fd73f8d6cd00fde412b22adc77c07b9e47d6ad7188850e9cbdbc588de" exitCode=0 Nov 26 07:07:34 crc kubenswrapper[4940]: I1126 07:07:34.559259 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" event={"ID":"d3a179ca-909c-4b27-a176-e91d5d64399b","Type":"ContainerDied","Data":"9b4eb30fd73f8d6cd00fde412b22adc77c07b9e47d6ad7188850e9cbdbc588de"} Nov 26 07:07:35 crc kubenswrapper[4940]: I1126 07:07:35.570321 4940 generic.go:334] "Generic (PLEG): container finished" podID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerID="b55e43f7d50647942604abd7a406341748f4a27f46d5845e0b0c1e075220acfe" exitCode=0 Nov 26 07:07:35 crc kubenswrapper[4940]: I1126 07:07:35.570364 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" event={"ID":"d3a179ca-909c-4b27-a176-e91d5d64399b","Type":"ContainerDied","Data":"b55e43f7d50647942604abd7a406341748f4a27f46d5845e0b0c1e075220acfe"} Nov 26 07:07:35 crc kubenswrapper[4940]: I1126 07:07:35.573458 4940 generic.go:334] "Generic (PLEG): container finished" podID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerID="fddc21a877d6cbb2764b348dd98715b8894158c886f5211ccbd05c3d9a69ff45" exitCode=0 Nov 26 07:07:35 crc kubenswrapper[4940]: I1126 07:07:35.573492 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqx67" event={"ID":"3dac3e04-ae30-4b07-9f0d-6915d7d50205","Type":"ContainerDied","Data":"fddc21a877d6cbb2764b348dd98715b8894158c886f5211ccbd05c3d9a69ff45"} Nov 26 07:07:36 crc kubenswrapper[4940]: I1126 07:07:36.582861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqx67" event={"ID":"3dac3e04-ae30-4b07-9f0d-6915d7d50205","Type":"ContainerStarted","Data":"b75f0dfbbf151ea565006944044f04b97d6ff265af7228336945407e42ae3f00"} Nov 26 07:07:36 crc kubenswrapper[4940]: I1126 07:07:36.617177 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pqx67" podStartSLOduration=1.958025739 podStartE2EDuration="4.617161419s" podCreationTimestamp="2025-11-26 07:07:32 +0000 UTC" firstStartedPulling="2025-11-26 07:07:33.549369593 +0000 UTC m=+755.069511212" lastFinishedPulling="2025-11-26 07:07:36.208505233 +0000 UTC m=+757.728646892" observedRunningTime="2025-11-26 07:07:36.61433589 +0000 UTC m=+758.134477519" watchObservedRunningTime="2025-11-26 07:07:36.617161419 +0000 UTC m=+758.137303038" Nov 26 07:07:36 crc kubenswrapper[4940]: I1126 07:07:36.927855 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.028152 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-bundle\") pod \"d3a179ca-909c-4b27-a176-e91d5d64399b\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.028237 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjxmz\" (UniqueName: \"kubernetes.io/projected/d3a179ca-909c-4b27-a176-e91d5d64399b-kube-api-access-hjxmz\") pod \"d3a179ca-909c-4b27-a176-e91d5d64399b\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.028378 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-util\") pod \"d3a179ca-909c-4b27-a176-e91d5d64399b\" (UID: \"d3a179ca-909c-4b27-a176-e91d5d64399b\") " Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.029193 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-bundle" (OuterVolumeSpecName: "bundle") pod "d3a179ca-909c-4b27-a176-e91d5d64399b" (UID: "d3a179ca-909c-4b27-a176-e91d5d64399b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.035634 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3a179ca-909c-4b27-a176-e91d5d64399b-kube-api-access-hjxmz" (OuterVolumeSpecName: "kube-api-access-hjxmz") pod "d3a179ca-909c-4b27-a176-e91d5d64399b" (UID: "d3a179ca-909c-4b27-a176-e91d5d64399b"). InnerVolumeSpecName "kube-api-access-hjxmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.043441 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-util" (OuterVolumeSpecName: "util") pod "d3a179ca-909c-4b27-a176-e91d5d64399b" (UID: "d3a179ca-909c-4b27-a176-e91d5d64399b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.130315 4940 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.130358 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjxmz\" (UniqueName: \"kubernetes.io/projected/d3a179ca-909c-4b27-a176-e91d5d64399b-kube-api-access-hjxmz\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.130375 4940 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d3a179ca-909c-4b27-a176-e91d5d64399b-util\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.318234 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mk75l" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.590842 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" event={"ID":"d3a179ca-909c-4b27-a176-e91d5d64399b","Type":"ContainerDied","Data":"855b1c44018a23759e9d0fa68f48fd9d3070caa27f0a83c64cdb32f25338d779"} Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.590881 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="855b1c44018a23759e9d0fa68f48fd9d3070caa27f0a83c64cdb32f25338d779" Nov 26 07:07:37 crc kubenswrapper[4940]: I1126 07:07:37.590897 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.147836 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5fsff"] Nov 26 07:07:40 crc kubenswrapper[4940]: E1126 07:07:40.148347 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerName="pull" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.148364 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerName="pull" Nov 26 07:07:40 crc kubenswrapper[4940]: E1126 07:07:40.148383 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerName="util" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.148391 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerName="util" Nov 26 07:07:40 crc kubenswrapper[4940]: E1126 07:07:40.148406 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerName="extract" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.148417 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerName="extract" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.148528 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3a179ca-909c-4b27-a176-e91d5d64399b" containerName="extract" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.148958 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fsff" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.150802 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.151198 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-kqd9k" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.151453 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.156553 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5fsff"] Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.268850 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwpk4\" (UniqueName: \"kubernetes.io/projected/8200355b-df22-4b1d-8cd2-578662a58762-kube-api-access-bwpk4\") pod \"nmstate-operator-557fdffb88-5fsff\" (UID: \"8200355b-df22-4b1d-8cd2-578662a58762\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5fsff" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.370428 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwpk4\" (UniqueName: \"kubernetes.io/projected/8200355b-df22-4b1d-8cd2-578662a58762-kube-api-access-bwpk4\") pod \"nmstate-operator-557fdffb88-5fsff\" (UID: \"8200355b-df22-4b1d-8cd2-578662a58762\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5fsff" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.388830 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwpk4\" (UniqueName: \"kubernetes.io/projected/8200355b-df22-4b1d-8cd2-578662a58762-kube-api-access-bwpk4\") pod \"nmstate-operator-557fdffb88-5fsff\" (UID: \"8200355b-df22-4b1d-8cd2-578662a58762\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5fsff" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.506139 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fsff" Nov 26 07:07:40 crc kubenswrapper[4940]: I1126 07:07:40.958065 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5fsff"] Nov 26 07:07:41 crc kubenswrapper[4940]: I1126 07:07:41.619795 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fsff" event={"ID":"8200355b-df22-4b1d-8cd2-578662a58762","Type":"ContainerStarted","Data":"44dd29b861822949bac3e7998a8b900191a0e6a39383d7b680cd9d27e68aab1f"} Nov 26 07:07:42 crc kubenswrapper[4940]: I1126 07:07:42.364630 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:42 crc kubenswrapper[4940]: I1126 07:07:42.364684 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:42 crc kubenswrapper[4940]: I1126 07:07:42.403765 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:42 crc kubenswrapper[4940]: I1126 07:07:42.657705 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:44 crc kubenswrapper[4940]: I1126 07:07:44.636300 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fsff" event={"ID":"8200355b-df22-4b1d-8cd2-578662a58762","Type":"ContainerStarted","Data":"27bcc22545cc5a1bab1e0ad8b20f7a1caf1a25f7671921d70c05d2bf5cabeb14"} Nov 26 07:07:44 crc kubenswrapper[4940]: I1126 07:07:44.650793 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fsff" podStartSLOduration=2.02913144 podStartE2EDuration="4.650772966s" podCreationTimestamp="2025-11-26 07:07:40 +0000 UTC" firstStartedPulling="2025-11-26 07:07:40.972750617 +0000 UTC m=+762.492892236" lastFinishedPulling="2025-11-26 07:07:43.594392143 +0000 UTC m=+765.114533762" observedRunningTime="2025-11-26 07:07:44.649355751 +0000 UTC m=+766.169497380" watchObservedRunningTime="2025-11-26 07:07:44.650772966 +0000 UTC m=+766.170914595" Nov 26 07:07:45 crc kubenswrapper[4940]: I1126 07:07:45.620707 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pqx67"] Nov 26 07:07:45 crc kubenswrapper[4940]: I1126 07:07:45.620949 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pqx67" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerName="registry-server" containerID="cri-o://b75f0dfbbf151ea565006944044f04b97d6ff265af7228336945407e42ae3f00" gracePeriod=2 Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.655936 4940 generic.go:334] "Generic (PLEG): container finished" podID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerID="b75f0dfbbf151ea565006944044f04b97d6ff265af7228336945407e42ae3f00" exitCode=0 Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.655988 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqx67" event={"ID":"3dac3e04-ae30-4b07-9f0d-6915d7d50205","Type":"ContainerDied","Data":"b75f0dfbbf151ea565006944044f04b97d6ff265af7228336945407e42ae3f00"} Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.716536 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.849803 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n86lx\" (UniqueName: \"kubernetes.io/projected/3dac3e04-ae30-4b07-9f0d-6915d7d50205-kube-api-access-n86lx\") pod \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.850376 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-catalog-content\") pod \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.850429 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-utilities\") pod \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\" (UID: \"3dac3e04-ae30-4b07-9f0d-6915d7d50205\") " Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.851214 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-utilities" (OuterVolumeSpecName: "utilities") pod "3dac3e04-ae30-4b07-9f0d-6915d7d50205" (UID: "3dac3e04-ae30-4b07-9f0d-6915d7d50205"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.855590 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dac3e04-ae30-4b07-9f0d-6915d7d50205-kube-api-access-n86lx" (OuterVolumeSpecName: "kube-api-access-n86lx") pod "3dac3e04-ae30-4b07-9f0d-6915d7d50205" (UID: "3dac3e04-ae30-4b07-9f0d-6915d7d50205"). InnerVolumeSpecName "kube-api-access-n86lx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.943959 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3dac3e04-ae30-4b07-9f0d-6915d7d50205" (UID: "3dac3e04-ae30-4b07-9f0d-6915d7d50205"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.951646 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.951697 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n86lx\" (UniqueName: \"kubernetes.io/projected/3dac3e04-ae30-4b07-9f0d-6915d7d50205-kube-api-access-n86lx\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4940]: I1126 07:07:46.951711 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dac3e04-ae30-4b07-9f0d-6915d7d50205-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.226988 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6brc6"] Nov 26 07:07:47 crc kubenswrapper[4940]: E1126 07:07:47.227199 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerName="registry-server" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.227210 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerName="registry-server" Nov 26 07:07:47 crc kubenswrapper[4940]: E1126 07:07:47.227230 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerName="extract-utilities" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.227236 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerName="extract-utilities" Nov 26 07:07:47 crc kubenswrapper[4940]: E1126 07:07:47.227244 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerName="extract-content" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.227250 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerName="extract-content" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.227350 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" containerName="registry-server" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.228056 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.237705 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6brc6"] Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.355874 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-catalog-content\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.356109 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-utilities\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.356419 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw576\" (UniqueName: \"kubernetes.io/projected/02c3ee1a-8401-4c8d-8450-b015407c362a-kube-api-access-gw576\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.457133 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw576\" (UniqueName: \"kubernetes.io/projected/02c3ee1a-8401-4c8d-8450-b015407c362a-kube-api-access-gw576\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.457183 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-catalog-content\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.457220 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-utilities\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.457672 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-catalog-content\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.457710 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-utilities\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.477320 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw576\" (UniqueName: \"kubernetes.io/projected/02c3ee1a-8401-4c8d-8450-b015407c362a-kube-api-access-gw576\") pod \"community-operators-6brc6\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.543899 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.678625 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqx67" event={"ID":"3dac3e04-ae30-4b07-9f0d-6915d7d50205","Type":"ContainerDied","Data":"5d3472a61a5cdbab7a819224a43070d0ff2bc1f43e17cdc71cec3e5411cbf104"} Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.678675 4940 scope.go:117] "RemoveContainer" containerID="b75f0dfbbf151ea565006944044f04b97d6ff265af7228336945407e42ae3f00" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.678788 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqx67" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.707250 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pqx67"] Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.712798 4940 scope.go:117] "RemoveContainer" containerID="fddc21a877d6cbb2764b348dd98715b8894158c886f5211ccbd05c3d9a69ff45" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.720443 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pqx67"] Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.735884 4940 scope.go:117] "RemoveContainer" containerID="50610aeab0be96c69af131b9c205c9d2ff0896e7cc087726ec75bf9aa61f227f" Nov 26 07:07:47 crc kubenswrapper[4940]: I1126 07:07:47.830715 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6brc6"] Nov 26 07:07:47 crc kubenswrapper[4940]: W1126 07:07:47.834325 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02c3ee1a_8401_4c8d_8450_b015407c362a.slice/crio-753cb252a38d5b1b9e4d1cdf209fa815b49eed466c15d581204e899ad4124b97 WatchSource:0}: Error finding container 753cb252a38d5b1b9e4d1cdf209fa815b49eed466c15d581204e899ad4124b97: Status 404 returned error can't find the container with id 753cb252a38d5b1b9e4d1cdf209fa815b49eed466c15d581204e899ad4124b97 Nov 26 07:07:48 crc kubenswrapper[4940]: I1126 07:07:48.685134 4940 generic.go:334] "Generic (PLEG): container finished" podID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerID="c3364907b309e2cb89c76c5fd0ccbb73a0d4e4e1093330ea23e1c77e3412b0c9" exitCode=0 Nov 26 07:07:48 crc kubenswrapper[4940]: I1126 07:07:48.685221 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6brc6" event={"ID":"02c3ee1a-8401-4c8d-8450-b015407c362a","Type":"ContainerDied","Data":"c3364907b309e2cb89c76c5fd0ccbb73a0d4e4e1093330ea23e1c77e3412b0c9"} Nov 26 07:07:48 crc kubenswrapper[4940]: I1126 07:07:48.685260 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6brc6" event={"ID":"02c3ee1a-8401-4c8d-8450-b015407c362a","Type":"ContainerStarted","Data":"753cb252a38d5b1b9e4d1cdf209fa815b49eed466c15d581204e899ad4124b97"} Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.174591 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dac3e04-ae30-4b07-9f0d-6915d7d50205" path="/var/lib/kubelet/pods/3dac3e04-ae30-4b07-9f0d-6915d7d50205/volumes" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.692681 4940 generic.go:334] "Generic (PLEG): container finished" podID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerID="554b513d8b80a9b5c850dc5219d19dfe5275fdfcd6a02a761ab937a448ce4e5f" exitCode=0 Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.692731 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6brc6" event={"ID":"02c3ee1a-8401-4c8d-8450-b015407c362a","Type":"ContainerDied","Data":"554b513d8b80a9b5c850dc5219d19dfe5275fdfcd6a02a761ab937a448ce4e5f"} Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.828346 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj"] Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.829373 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.831239 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-qjlnv" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.838598 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq"] Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.839327 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.844245 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.845868 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj"] Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.853460 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq"] Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.865380 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-w7vjz"] Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.866171 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.951276 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2"] Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.952093 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.954098 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-kdtmq" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.954261 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.954753 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.969434 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2"] Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.998264 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxlfd\" (UniqueName: \"kubernetes.io/projected/4e1ada7b-3473-46da-bae3-457ac931f202-kube-api-access-rxlfd\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.998324 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-ovs-socket\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.998585 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-dbus-socket\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.998624 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-nmstate-lock\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.998694 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkhxr\" (UniqueName: \"kubernetes.io/projected/0acfba00-a78f-4f07-9273-dbd13ea957db-kube-api-access-rkhxr\") pod \"nmstate-webhook-6b89b748d8-ll7sq\" (UID: \"0acfba00-a78f-4f07-9273-dbd13ea957db\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.998762 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-849x4\" (UniqueName: \"kubernetes.io/projected/9dffd2d7-450f-4452-8aac-29ca0dae27b2-kube-api-access-849x4\") pod \"nmstate-metrics-5dcf9c57c5-2djsj\" (UID: \"9dffd2d7-450f-4452-8aac-29ca0dae27b2\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" Nov 26 07:07:49 crc kubenswrapper[4940]: I1126 07:07:49.998794 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0acfba00-a78f-4f07-9273-dbd13ea957db-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-ll7sq\" (UID: \"0acfba00-a78f-4f07-9273-dbd13ea957db\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.099886 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-849x4\" (UniqueName: \"kubernetes.io/projected/9dffd2d7-450f-4452-8aac-29ca0dae27b2-kube-api-access-849x4\") pod \"nmstate-metrics-5dcf9c57c5-2djsj\" (UID: \"9dffd2d7-450f-4452-8aac-29ca0dae27b2\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.099932 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0acfba00-a78f-4f07-9273-dbd13ea957db-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-ll7sq\" (UID: \"0acfba00-a78f-4f07-9273-dbd13ea957db\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.099970 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee144795-d94d-4e4a-8e98-6e9d554513f9-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100010 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ee144795-d94d-4e4a-8e98-6e9d554513f9-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100063 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxlfd\" (UniqueName: \"kubernetes.io/projected/4e1ada7b-3473-46da-bae3-457ac931f202-kube-api-access-rxlfd\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100086 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-ovs-socket\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100113 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-dbus-socket\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100136 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6khd\" (UniqueName: \"kubernetes.io/projected/ee144795-d94d-4e4a-8e98-6e9d554513f9-kube-api-access-h6khd\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100160 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-nmstate-lock\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100194 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkhxr\" (UniqueName: \"kubernetes.io/projected/0acfba00-a78f-4f07-9273-dbd13ea957db-kube-api-access-rkhxr\") pod \"nmstate-webhook-6b89b748d8-ll7sq\" (UID: \"0acfba00-a78f-4f07-9273-dbd13ea957db\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100256 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-nmstate-lock\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100374 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-dbus-socket\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.100450 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/4e1ada7b-3473-46da-bae3-457ac931f202-ovs-socket\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.104697 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0acfba00-a78f-4f07-9273-dbd13ea957db-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-ll7sq\" (UID: \"0acfba00-a78f-4f07-9273-dbd13ea957db\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.118871 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-849x4\" (UniqueName: \"kubernetes.io/projected/9dffd2d7-450f-4452-8aac-29ca0dae27b2-kube-api-access-849x4\") pod \"nmstate-metrics-5dcf9c57c5-2djsj\" (UID: \"9dffd2d7-450f-4452-8aac-29ca0dae27b2\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.128906 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxlfd\" (UniqueName: \"kubernetes.io/projected/4e1ada7b-3473-46da-bae3-457ac931f202-kube-api-access-rxlfd\") pod \"nmstate-handler-w7vjz\" (UID: \"4e1ada7b-3473-46da-bae3-457ac931f202\") " pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.129252 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkhxr\" (UniqueName: \"kubernetes.io/projected/0acfba00-a78f-4f07-9273-dbd13ea957db-kube-api-access-rkhxr\") pod \"nmstate-webhook-6b89b748d8-ll7sq\" (UID: \"0acfba00-a78f-4f07-9273-dbd13ea957db\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.146547 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-84f7b987b5-xcqxn"] Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.147315 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.162713 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-84f7b987b5-xcqxn"] Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.195348 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.201001 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ee144795-d94d-4e4a-8e98-6e9d554513f9-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.201097 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6khd\" (UniqueName: \"kubernetes.io/projected/ee144795-d94d-4e4a-8e98-6e9d554513f9-kube-api-access-h6khd\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.201184 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee144795-d94d-4e4a-8e98-6e9d554513f9-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: E1126 07:07:50.201273 4940 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 26 07:07:50 crc kubenswrapper[4940]: E1126 07:07:50.201337 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ee144795-d94d-4e4a-8e98-6e9d554513f9-plugin-serving-cert podName:ee144795-d94d-4e4a-8e98-6e9d554513f9 nodeName:}" failed. No retries permitted until 2025-11-26 07:07:50.701317192 +0000 UTC m=+772.221458811 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/ee144795-d94d-4e4a-8e98-6e9d554513f9-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-m99d2" (UID: "ee144795-d94d-4e4a-8e98-6e9d554513f9") : secret "plugin-serving-cert" not found Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.202202 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ee144795-d94d-4e4a-8e98-6e9d554513f9-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.207475 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.215307 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.222778 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6khd\" (UniqueName: \"kubernetes.io/projected/ee144795-d94d-4e4a-8e98-6e9d554513f9-kube-api-access-h6khd\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: W1126 07:07:50.251639 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e1ada7b_3473_46da_bae3_457ac931f202.slice/crio-bc91f05a6a1007bf105b12427bbca8c59566e9b4dbd7241740198cfc2e2bb369 WatchSource:0}: Error finding container bc91f05a6a1007bf105b12427bbca8c59566e9b4dbd7241740198cfc2e2bb369: Status 404 returned error can't find the container with id bc91f05a6a1007bf105b12427bbca8c59566e9b4dbd7241740198cfc2e2bb369 Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.302008 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-oauth-serving-cert\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.302350 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-trusted-ca-bundle\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.302403 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd8zd\" (UniqueName: \"kubernetes.io/projected/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-kube-api-access-rd8zd\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.302457 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-oauth-config\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.302497 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-serving-cert\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.302522 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-service-ca\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.302571 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-config\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.397714 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj"] Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.403621 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-oauth-serving-cert\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.403670 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-trusted-ca-bundle\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.403714 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd8zd\" (UniqueName: \"kubernetes.io/projected/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-kube-api-access-rd8zd\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.403778 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-oauth-config\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.403814 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-serving-cert\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.403841 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-service-ca\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.403879 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-config\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.405155 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-oauth-serving-cert\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.405373 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-config\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.405480 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-trusted-ca-bundle\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.407695 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-service-ca\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.409897 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-serving-cert\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.409933 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-console-oauth-config\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.420650 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd8zd\" (UniqueName: \"kubernetes.io/projected/67c657a5-7f1e-4b5a-9327-8b2d9cb04905-kube-api-access-rd8zd\") pod \"console-84f7b987b5-xcqxn\" (UID: \"67c657a5-7f1e-4b5a-9327-8b2d9cb04905\") " pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.512032 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.654066 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq"] Nov 26 07:07:50 crc kubenswrapper[4940]: W1126 07:07:50.666112 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0acfba00_a78f_4f07_9273_dbd13ea957db.slice/crio-f914bafe65657d5d72a4c5999a449201752c95460909f312db5d2f53dcd7039b WatchSource:0}: Error finding container f914bafe65657d5d72a4c5999a449201752c95460909f312db5d2f53dcd7039b: Status 404 returned error can't find the container with id f914bafe65657d5d72a4c5999a449201752c95460909f312db5d2f53dcd7039b Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.704966 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6brc6" event={"ID":"02c3ee1a-8401-4c8d-8450-b015407c362a","Type":"ContainerStarted","Data":"d00a04b0a655f79b26aaeaef0c12a332dd52a17ad1dc481afb97e6b3ad348908"} Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.707293 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee144795-d94d-4e4a-8e98-6e9d554513f9-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.708498 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" event={"ID":"9dffd2d7-450f-4452-8aac-29ca0dae27b2","Type":"ContainerStarted","Data":"5350cc82d2522796c7cc5ebea8a48f00e12a8c3f040f31760c2d34458b38e79d"} Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.709214 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" event={"ID":"0acfba00-a78f-4f07-9273-dbd13ea957db","Type":"ContainerStarted","Data":"f914bafe65657d5d72a4c5999a449201752c95460909f312db5d2f53dcd7039b"} Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.710136 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-w7vjz" event={"ID":"4e1ada7b-3473-46da-bae3-457ac931f202","Type":"ContainerStarted","Data":"bc91f05a6a1007bf105b12427bbca8c59566e9b4dbd7241740198cfc2e2bb369"} Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.710981 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ee144795-d94d-4e4a-8e98-6e9d554513f9-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-m99d2\" (UID: \"ee144795-d94d-4e4a-8e98-6e9d554513f9\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.721261 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6brc6" podStartSLOduration=2.278644272 podStartE2EDuration="3.721247202s" podCreationTimestamp="2025-11-26 07:07:47 +0000 UTC" firstStartedPulling="2025-11-26 07:07:48.686935805 +0000 UTC m=+770.207077424" lastFinishedPulling="2025-11-26 07:07:50.129538735 +0000 UTC m=+771.649680354" observedRunningTime="2025-11-26 07:07:50.718622579 +0000 UTC m=+772.238764208" watchObservedRunningTime="2025-11-26 07:07:50.721247202 +0000 UTC m=+772.241388821" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.865611 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" Nov 26 07:07:50 crc kubenswrapper[4940]: I1126 07:07:50.902995 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-84f7b987b5-xcqxn"] Nov 26 07:07:50 crc kubenswrapper[4940]: W1126 07:07:50.914946 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67c657a5_7f1e_4b5a_9327_8b2d9cb04905.slice/crio-34cadf84659cd03ccf3d7ad3e8afae6e90beb8c5a05372932d2819ab00228eca WatchSource:0}: Error finding container 34cadf84659cd03ccf3d7ad3e8afae6e90beb8c5a05372932d2819ab00228eca: Status 404 returned error can't find the container with id 34cadf84659cd03ccf3d7ad3e8afae6e90beb8c5a05372932d2819ab00228eca Nov 26 07:07:51 crc kubenswrapper[4940]: I1126 07:07:51.303693 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2"] Nov 26 07:07:51 crc kubenswrapper[4940]: W1126 07:07:51.312481 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee144795_d94d_4e4a_8e98_6e9d554513f9.slice/crio-e83d5873c2f45c6b5d9267ffa4c3cd012bb93f8fbf64ace5a9e69f8bdb055bc1 WatchSource:0}: Error finding container e83d5873c2f45c6b5d9267ffa4c3cd012bb93f8fbf64ace5a9e69f8bdb055bc1: Status 404 returned error can't find the container with id e83d5873c2f45c6b5d9267ffa4c3cd012bb93f8fbf64ace5a9e69f8bdb055bc1 Nov 26 07:07:51 crc kubenswrapper[4940]: I1126 07:07:51.726317 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84f7b987b5-xcqxn" event={"ID":"67c657a5-7f1e-4b5a-9327-8b2d9cb04905","Type":"ContainerStarted","Data":"97017601d91e460b42b0df907cb07962b4314932f87dbc34ebc98e5a5611f127"} Nov 26 07:07:51 crc kubenswrapper[4940]: I1126 07:07:51.726376 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84f7b987b5-xcqxn" event={"ID":"67c657a5-7f1e-4b5a-9327-8b2d9cb04905","Type":"ContainerStarted","Data":"34cadf84659cd03ccf3d7ad3e8afae6e90beb8c5a05372932d2819ab00228eca"} Nov 26 07:07:51 crc kubenswrapper[4940]: I1126 07:07:51.727867 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:07:51 crc kubenswrapper[4940]: I1126 07:07:51.727909 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:07:51 crc kubenswrapper[4940]: I1126 07:07:51.728147 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" event={"ID":"ee144795-d94d-4e4a-8e98-6e9d554513f9","Type":"ContainerStarted","Data":"e83d5873c2f45c6b5d9267ffa4c3cd012bb93f8fbf64ace5a9e69f8bdb055bc1"} Nov 26 07:07:51 crc kubenswrapper[4940]: I1126 07:07:51.747526 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-84f7b987b5-xcqxn" podStartSLOduration=1.747506234 podStartE2EDuration="1.747506234s" podCreationTimestamp="2025-11-26 07:07:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:51.742845027 +0000 UTC m=+773.262986666" watchObservedRunningTime="2025-11-26 07:07:51.747506234 +0000 UTC m=+773.267647863" Nov 26 07:07:53 crc kubenswrapper[4940]: I1126 07:07:53.740070 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" event={"ID":"9dffd2d7-450f-4452-8aac-29ca0dae27b2","Type":"ContainerStarted","Data":"2357ecd1b7acca20c867b2f7e7da0b879907233d94b9b7c26431e6577fffa47d"} Nov 26 07:07:53 crc kubenswrapper[4940]: I1126 07:07:53.741283 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" event={"ID":"0acfba00-a78f-4f07-9273-dbd13ea957db","Type":"ContainerStarted","Data":"fccc79f3f8351e1d832c941a0cf6a2a59f9fc5fd8c524675075d8583330e18c7"} Nov 26 07:07:53 crc kubenswrapper[4940]: I1126 07:07:53.741417 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:07:53 crc kubenswrapper[4940]: I1126 07:07:53.742339 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-w7vjz" event={"ID":"4e1ada7b-3473-46da-bae3-457ac931f202","Type":"ContainerStarted","Data":"2eb4884bb92a9c5ae5e578ba650c2fefe341c726433ba8577ab4cfc274d02726"} Nov 26 07:07:53 crc kubenswrapper[4940]: I1126 07:07:53.742510 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:07:53 crc kubenswrapper[4940]: I1126 07:07:53.765193 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" podStartSLOduration=2.604610761 podStartE2EDuration="4.765174475s" podCreationTimestamp="2025-11-26 07:07:49 +0000 UTC" firstStartedPulling="2025-11-26 07:07:50.672059039 +0000 UTC m=+772.192200658" lastFinishedPulling="2025-11-26 07:07:52.832622753 +0000 UTC m=+774.352764372" observedRunningTime="2025-11-26 07:07:53.75711163 +0000 UTC m=+775.277253269" watchObservedRunningTime="2025-11-26 07:07:53.765174475 +0000 UTC m=+775.285316104" Nov 26 07:07:53 crc kubenswrapper[4940]: I1126 07:07:53.774720 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-w7vjz" podStartSLOduration=2.214981615 podStartE2EDuration="4.774702876s" podCreationTimestamp="2025-11-26 07:07:49 +0000 UTC" firstStartedPulling="2025-11-26 07:07:50.25446691 +0000 UTC m=+771.774608529" lastFinishedPulling="2025-11-26 07:07:52.814188161 +0000 UTC m=+774.334329790" observedRunningTime="2025-11-26 07:07:53.773457076 +0000 UTC m=+775.293598716" watchObservedRunningTime="2025-11-26 07:07:53.774702876 +0000 UTC m=+775.294844495" Nov 26 07:07:54 crc kubenswrapper[4940]: I1126 07:07:54.756414 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" event={"ID":"ee144795-d94d-4e4a-8e98-6e9d554513f9","Type":"ContainerStarted","Data":"bdf046bd59754aafef1fbff1af55a8b9f72209a5d63ed2b8e267a87ef5b025a2"} Nov 26 07:07:54 crc kubenswrapper[4940]: I1126 07:07:54.769154 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-m99d2" podStartSLOduration=3.352741888 podStartE2EDuration="5.769132692s" podCreationTimestamp="2025-11-26 07:07:49 +0000 UTC" firstStartedPulling="2025-11-26 07:07:51.315123598 +0000 UTC m=+772.835265217" lastFinishedPulling="2025-11-26 07:07:53.731514402 +0000 UTC m=+775.251656021" observedRunningTime="2025-11-26 07:07:54.768393719 +0000 UTC m=+776.288535338" watchObservedRunningTime="2025-11-26 07:07:54.769132692 +0000 UTC m=+776.289274311" Nov 26 07:07:55 crc kubenswrapper[4940]: I1126 07:07:55.774410 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" event={"ID":"9dffd2d7-450f-4452-8aac-29ca0dae27b2","Type":"ContainerStarted","Data":"6fee1127f1830668f3ec7bd3a01c6d307a13c9d932d9674bf072f149e7380e93"} Nov 26 07:07:55 crc kubenswrapper[4940]: I1126 07:07:55.799276 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-2djsj" podStartSLOduration=1.852457418 podStartE2EDuration="6.799253626s" podCreationTimestamp="2025-11-26 07:07:49 +0000 UTC" firstStartedPulling="2025-11-26 07:07:50.412619736 +0000 UTC m=+771.932761355" lastFinishedPulling="2025-11-26 07:07:55.359415944 +0000 UTC m=+776.879557563" observedRunningTime="2025-11-26 07:07:55.79431754 +0000 UTC m=+777.314459189" watchObservedRunningTime="2025-11-26 07:07:55.799253626 +0000 UTC m=+777.319395235" Nov 26 07:07:57 crc kubenswrapper[4940]: I1126 07:07:57.544315 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:57 crc kubenswrapper[4940]: I1126 07:07:57.544682 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:57 crc kubenswrapper[4940]: I1126 07:07:57.601966 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:57 crc kubenswrapper[4940]: I1126 07:07:57.843428 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:07:57 crc kubenswrapper[4940]: I1126 07:07:57.900956 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6brc6"] Nov 26 07:07:59 crc kubenswrapper[4940]: I1126 07:07:59.796960 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6brc6" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerName="registry-server" containerID="cri-o://d00a04b0a655f79b26aaeaef0c12a332dd52a17ad1dc481afb97e6b3ad348908" gracePeriod=2 Nov 26 07:08:00 crc kubenswrapper[4940]: I1126 07:08:00.233708 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-w7vjz" Nov 26 07:08:00 crc kubenswrapper[4940]: I1126 07:08:00.512650 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:08:00 crc kubenswrapper[4940]: I1126 07:08:00.512992 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:08:00 crc kubenswrapper[4940]: I1126 07:08:00.520627 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:08:00 crc kubenswrapper[4940]: I1126 07:08:00.804535 4940 generic.go:334] "Generic (PLEG): container finished" podID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerID="d00a04b0a655f79b26aaeaef0c12a332dd52a17ad1dc481afb97e6b3ad348908" exitCode=0 Nov 26 07:08:00 crc kubenswrapper[4940]: I1126 07:08:00.804708 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6brc6" event={"ID":"02c3ee1a-8401-4c8d-8450-b015407c362a","Type":"ContainerDied","Data":"d00a04b0a655f79b26aaeaef0c12a332dd52a17ad1dc481afb97e6b3ad348908"} Nov 26 07:08:00 crc kubenswrapper[4940]: I1126 07:08:00.811317 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-84f7b987b5-xcqxn" Nov 26 07:08:00 crc kubenswrapper[4940]: I1126 07:08:00.857546 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-bflcx"] Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.802129 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.810123 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6brc6" event={"ID":"02c3ee1a-8401-4c8d-8450-b015407c362a","Type":"ContainerDied","Data":"753cb252a38d5b1b9e4d1cdf209fa815b49eed466c15d581204e899ad4124b97"} Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.810147 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6brc6" Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.810569 4940 scope.go:117] "RemoveContainer" containerID="d00a04b0a655f79b26aaeaef0c12a332dd52a17ad1dc481afb97e6b3ad348908" Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.828081 4940 scope.go:117] "RemoveContainer" containerID="554b513d8b80a9b5c850dc5219d19dfe5275fdfcd6a02a761ab937a448ce4e5f" Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.846892 4940 scope.go:117] "RemoveContainer" containerID="c3364907b309e2cb89c76c5fd0ccbb73a0d4e4e1093330ea23e1c77e3412b0c9" Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.906444 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-utilities\") pod \"02c3ee1a-8401-4c8d-8450-b015407c362a\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.906516 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw576\" (UniqueName: \"kubernetes.io/projected/02c3ee1a-8401-4c8d-8450-b015407c362a-kube-api-access-gw576\") pod \"02c3ee1a-8401-4c8d-8450-b015407c362a\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.906700 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-catalog-content\") pod \"02c3ee1a-8401-4c8d-8450-b015407c362a\" (UID: \"02c3ee1a-8401-4c8d-8450-b015407c362a\") " Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.907703 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-utilities" (OuterVolumeSpecName: "utilities") pod "02c3ee1a-8401-4c8d-8450-b015407c362a" (UID: "02c3ee1a-8401-4c8d-8450-b015407c362a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.911968 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02c3ee1a-8401-4c8d-8450-b015407c362a-kube-api-access-gw576" (OuterVolumeSpecName: "kube-api-access-gw576") pod "02c3ee1a-8401-4c8d-8450-b015407c362a" (UID: "02c3ee1a-8401-4c8d-8450-b015407c362a"). InnerVolumeSpecName "kube-api-access-gw576". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:01 crc kubenswrapper[4940]: I1126 07:08:01.961067 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "02c3ee1a-8401-4c8d-8450-b015407c362a" (UID: "02c3ee1a-8401-4c8d-8450-b015407c362a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:02 crc kubenswrapper[4940]: I1126 07:08:02.010640 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:02 crc kubenswrapper[4940]: I1126 07:08:02.010700 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw576\" (UniqueName: \"kubernetes.io/projected/02c3ee1a-8401-4c8d-8450-b015407c362a-kube-api-access-gw576\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:02 crc kubenswrapper[4940]: I1126 07:08:02.010722 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02c3ee1a-8401-4c8d-8450-b015407c362a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:02 crc kubenswrapper[4940]: I1126 07:08:02.143563 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6brc6"] Nov 26 07:08:02 crc kubenswrapper[4940]: I1126 07:08:02.151357 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6brc6"] Nov 26 07:08:03 crc kubenswrapper[4940]: I1126 07:08:03.172820 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" path="/var/lib/kubelet/pods/02c3ee1a-8401-4c8d-8450-b015407c362a/volumes" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.048892 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-f2569"] Nov 26 07:08:04 crc kubenswrapper[4940]: E1126 07:08:04.049274 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerName="registry-server" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.049301 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerName="registry-server" Nov 26 07:08:04 crc kubenswrapper[4940]: E1126 07:08:04.049325 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerName="extract-content" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.049338 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerName="extract-content" Nov 26 07:08:04 crc kubenswrapper[4940]: E1126 07:08:04.049361 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerName="extract-utilities" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.049374 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerName="extract-utilities" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.049541 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="02c3ee1a-8401-4c8d-8450-b015407c362a" containerName="registry-server" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.050858 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.058447 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-f2569"] Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.138464 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlnsl\" (UniqueName: \"kubernetes.io/projected/95569f59-f387-4c98-b91c-30fcf4ec3c3f-kube-api-access-mlnsl\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.138510 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-catalog-content\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.138595 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-utilities\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.239354 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlnsl\" (UniqueName: \"kubernetes.io/projected/95569f59-f387-4c98-b91c-30fcf4ec3c3f-kube-api-access-mlnsl\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.239395 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-catalog-content\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.239455 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-utilities\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.239899 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-utilities\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.240464 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-catalog-content\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.262264 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlnsl\" (UniqueName: \"kubernetes.io/projected/95569f59-f387-4c98-b91c-30fcf4ec3c3f-kube-api-access-mlnsl\") pod \"redhat-marketplace-f2569\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.378946 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.779763 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-f2569"] Nov 26 07:08:04 crc kubenswrapper[4940]: W1126 07:08:04.783155 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95569f59_f387_4c98_b91c_30fcf4ec3c3f.slice/crio-22314ca63c5c5c0e5e3abbb677b06307351cb2c3e9c6b3f22801b97a461c713f WatchSource:0}: Error finding container 22314ca63c5c5c0e5e3abbb677b06307351cb2c3e9c6b3f22801b97a461c713f: Status 404 returned error can't find the container with id 22314ca63c5c5c0e5e3abbb677b06307351cb2c3e9c6b3f22801b97a461c713f Nov 26 07:08:04 crc kubenswrapper[4940]: I1126 07:08:04.832459 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f2569" event={"ID":"95569f59-f387-4c98-b91c-30fcf4ec3c3f","Type":"ContainerStarted","Data":"22314ca63c5c5c0e5e3abbb677b06307351cb2c3e9c6b3f22801b97a461c713f"} Nov 26 07:08:05 crc kubenswrapper[4940]: I1126 07:08:05.839922 4940 generic.go:334] "Generic (PLEG): container finished" podID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerID="e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe" exitCode=0 Nov 26 07:08:05 crc kubenswrapper[4940]: I1126 07:08:05.839965 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f2569" event={"ID":"95569f59-f387-4c98-b91c-30fcf4ec3c3f","Type":"ContainerDied","Data":"e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe"} Nov 26 07:08:06 crc kubenswrapper[4940]: I1126 07:08:06.848206 4940 generic.go:334] "Generic (PLEG): container finished" podID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerID="118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837" exitCode=0 Nov 26 07:08:06 crc kubenswrapper[4940]: I1126 07:08:06.848325 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f2569" event={"ID":"95569f59-f387-4c98-b91c-30fcf4ec3c3f","Type":"ContainerDied","Data":"118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837"} Nov 26 07:08:07 crc kubenswrapper[4940]: I1126 07:08:07.857697 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f2569" event={"ID":"95569f59-f387-4c98-b91c-30fcf4ec3c3f","Type":"ContainerStarted","Data":"804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2"} Nov 26 07:08:07 crc kubenswrapper[4940]: I1126 07:08:07.876662 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-f2569" podStartSLOduration=2.468541028 podStartE2EDuration="3.87664408s" podCreationTimestamp="2025-11-26 07:08:04 +0000 UTC" firstStartedPulling="2025-11-26 07:08:05.84252725 +0000 UTC m=+787.362668879" lastFinishedPulling="2025-11-26 07:08:07.250630292 +0000 UTC m=+788.770771931" observedRunningTime="2025-11-26 07:08:07.873377288 +0000 UTC m=+789.393518947" watchObservedRunningTime="2025-11-26 07:08:07.87664408 +0000 UTC m=+789.396785709" Nov 26 07:08:10 crc kubenswrapper[4940]: I1126 07:08:10.214989 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ll7sq" Nov 26 07:08:14 crc kubenswrapper[4940]: I1126 07:08:14.379893 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:14 crc kubenswrapper[4940]: I1126 07:08:14.380138 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:14 crc kubenswrapper[4940]: I1126 07:08:14.449862 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:14 crc kubenswrapper[4940]: I1126 07:08:14.954961 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:14 crc kubenswrapper[4940]: I1126 07:08:14.998601 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-f2569"] Nov 26 07:08:16 crc kubenswrapper[4940]: I1126 07:08:16.911271 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-f2569" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerName="registry-server" containerID="cri-o://804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2" gracePeriod=2 Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.383004 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.519528 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-utilities\") pod \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.519676 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-catalog-content\") pod \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.519717 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlnsl\" (UniqueName: \"kubernetes.io/projected/95569f59-f387-4c98-b91c-30fcf4ec3c3f-kube-api-access-mlnsl\") pod \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\" (UID: \"95569f59-f387-4c98-b91c-30fcf4ec3c3f\") " Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.520882 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-utilities" (OuterVolumeSpecName: "utilities") pod "95569f59-f387-4c98-b91c-30fcf4ec3c3f" (UID: "95569f59-f387-4c98-b91c-30fcf4ec3c3f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.534660 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95569f59-f387-4c98-b91c-30fcf4ec3c3f-kube-api-access-mlnsl" (OuterVolumeSpecName: "kube-api-access-mlnsl") pod "95569f59-f387-4c98-b91c-30fcf4ec3c3f" (UID: "95569f59-f387-4c98-b91c-30fcf4ec3c3f"). InnerVolumeSpecName "kube-api-access-mlnsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.540476 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "95569f59-f387-4c98-b91c-30fcf4ec3c3f" (UID: "95569f59-f387-4c98-b91c-30fcf4ec3c3f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.621109 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.621312 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlnsl\" (UniqueName: \"kubernetes.io/projected/95569f59-f387-4c98-b91c-30fcf4ec3c3f-kube-api-access-mlnsl\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.621643 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95569f59-f387-4c98-b91c-30fcf4ec3c3f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.919514 4940 generic.go:334] "Generic (PLEG): container finished" podID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerID="804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2" exitCode=0 Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.919564 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f2569" event={"ID":"95569f59-f387-4c98-b91c-30fcf4ec3c3f","Type":"ContainerDied","Data":"804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2"} Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.919593 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-f2569" event={"ID":"95569f59-f387-4c98-b91c-30fcf4ec3c3f","Type":"ContainerDied","Data":"22314ca63c5c5c0e5e3abbb677b06307351cb2c3e9c6b3f22801b97a461c713f"} Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.919617 4940 scope.go:117] "RemoveContainer" containerID="804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.919735 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-f2569" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.941778 4940 scope.go:117] "RemoveContainer" containerID="118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837" Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.967325 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-f2569"] Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.971306 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-f2569"] Nov 26 07:08:17 crc kubenswrapper[4940]: I1126 07:08:17.982119 4940 scope.go:117] "RemoveContainer" containerID="e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe" Nov 26 07:08:18 crc kubenswrapper[4940]: I1126 07:08:18.002400 4940 scope.go:117] "RemoveContainer" containerID="804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2" Nov 26 07:08:18 crc kubenswrapper[4940]: E1126 07:08:18.003229 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2\": container with ID starting with 804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2 not found: ID does not exist" containerID="804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2" Nov 26 07:08:18 crc kubenswrapper[4940]: I1126 07:08:18.003297 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2"} err="failed to get container status \"804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2\": rpc error: code = NotFound desc = could not find container \"804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2\": container with ID starting with 804c7a4017940385ebd1722a0131aa8ca2fdebae0e662c8182bf5defd6013ee2 not found: ID does not exist" Nov 26 07:08:18 crc kubenswrapper[4940]: I1126 07:08:18.003371 4940 scope.go:117] "RemoveContainer" containerID="118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837" Nov 26 07:08:18 crc kubenswrapper[4940]: E1126 07:08:18.003683 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837\": container with ID starting with 118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837 not found: ID does not exist" containerID="118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837" Nov 26 07:08:18 crc kubenswrapper[4940]: I1126 07:08:18.003726 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837"} err="failed to get container status \"118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837\": rpc error: code = NotFound desc = could not find container \"118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837\": container with ID starting with 118fde1c0165c5ac7c37f9a6cf6308990a9d7ed12e6ebc410aa6472c6c256837 not found: ID does not exist" Nov 26 07:08:18 crc kubenswrapper[4940]: I1126 07:08:18.003752 4940 scope.go:117] "RemoveContainer" containerID="e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe" Nov 26 07:08:18 crc kubenswrapper[4940]: E1126 07:08:18.003994 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe\": container with ID starting with e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe not found: ID does not exist" containerID="e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe" Nov 26 07:08:18 crc kubenswrapper[4940]: I1126 07:08:18.004020 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe"} err="failed to get container status \"e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe\": rpc error: code = NotFound desc = could not find container \"e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe\": container with ID starting with e81c2c65101753e369544d46f050844da2d7f75ab5315ce38a478dfe878f0ebe not found: ID does not exist" Nov 26 07:08:19 crc kubenswrapper[4940]: I1126 07:08:19.173096 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" path="/var/lib/kubelet/pods/95569f59-f387-4c98-b91c-30fcf4ec3c3f/volumes" Nov 26 07:08:21 crc kubenswrapper[4940]: I1126 07:08:21.728326 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:08:21 crc kubenswrapper[4940]: I1126 07:08:21.728983 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:08:21 crc kubenswrapper[4940]: I1126 07:08:21.729058 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:08:21 crc kubenswrapper[4940]: I1126 07:08:21.729677 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4831b08e520666c63a46d4332bc996f659d9f6b347ced8e4ef4908839bbfa56c"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:08:21 crc kubenswrapper[4940]: I1126 07:08:21.729760 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://4831b08e520666c63a46d4332bc996f659d9f6b347ced8e4ef4908839bbfa56c" gracePeriod=600 Nov 26 07:08:21 crc kubenswrapper[4940]: I1126 07:08:21.954336 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="4831b08e520666c63a46d4332bc996f659d9f6b347ced8e4ef4908839bbfa56c" exitCode=0 Nov 26 07:08:21 crc kubenswrapper[4940]: I1126 07:08:21.954424 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"4831b08e520666c63a46d4332bc996f659d9f6b347ced8e4ef4908839bbfa56c"} Nov 26 07:08:21 crc kubenswrapper[4940]: I1126 07:08:21.954883 4940 scope.go:117] "RemoveContainer" containerID="3eee2744c5ef6eeac0756de5055d0728c038b9c6865c21fd3c9a2ecfca6cd031" Nov 26 07:08:22 crc kubenswrapper[4940]: I1126 07:08:22.961871 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"a590ce9820b409bfd278396384f5037a9a72b69b72e14ce99d91eca689514af5"} Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.068521 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn"] Nov 26 07:08:24 crc kubenswrapper[4940]: E1126 07:08:24.069012 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerName="extract-utilities" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.069024 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerName="extract-utilities" Nov 26 07:08:24 crc kubenswrapper[4940]: E1126 07:08:24.069072 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerName="extract-content" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.069079 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerName="extract-content" Nov 26 07:08:24 crc kubenswrapper[4940]: E1126 07:08:24.069089 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerName="registry-server" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.069095 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerName="registry-server" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.069188 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="95569f59-f387-4c98-b91c-30fcf4ec3c3f" containerName="registry-server" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.069890 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.072427 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.083224 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn"] Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.203985 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.204108 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj52s\" (UniqueName: \"kubernetes.io/projected/a177e713-0abb-486a-bd7f-cc80ea4a762e-kube-api-access-vj52s\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.204164 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.305175 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.305517 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.305651 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj52s\" (UniqueName: \"kubernetes.io/projected/a177e713-0abb-486a-bd7f-cc80ea4a762e-kube-api-access-vj52s\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.305686 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.306088 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.326385 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj52s\" (UniqueName: \"kubernetes.io/projected/a177e713-0abb-486a-bd7f-cc80ea4a762e-kube-api-access-vj52s\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.384860 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.803566 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn"] Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.973174 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" event={"ID":"a177e713-0abb-486a-bd7f-cc80ea4a762e","Type":"ContainerStarted","Data":"593f04c5819e28455b8b81a1be25620348654642dab84da542f9905e9cd25b52"} Nov 26 07:08:24 crc kubenswrapper[4940]: I1126 07:08:24.973255 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" event={"ID":"a177e713-0abb-486a-bd7f-cc80ea4a762e","Type":"ContainerStarted","Data":"629ecf0a6c806e315201b2a165a85649dc554998266ecac57f4797246eb9883c"} Nov 26 07:08:25 crc kubenswrapper[4940]: I1126 07:08:25.917269 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-bflcx" podUID="831a57b7-e553-4c53-a658-b10d4183d514" containerName="console" containerID="cri-o://1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb" gracePeriod=15 Nov 26 07:08:25 crc kubenswrapper[4940]: I1126 07:08:25.980800 4940 generic.go:334] "Generic (PLEG): container finished" podID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerID="593f04c5819e28455b8b81a1be25620348654642dab84da542f9905e9cd25b52" exitCode=0 Nov 26 07:08:25 crc kubenswrapper[4940]: I1126 07:08:25.980874 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" event={"ID":"a177e713-0abb-486a-bd7f-cc80ea4a762e","Type":"ContainerDied","Data":"593f04c5819e28455b8b81a1be25620348654642dab84da542f9905e9cd25b52"} Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.307778 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-bflcx_831a57b7-e553-4c53-a658-b10d4183d514/console/0.log" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.308096 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-bflcx" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.436559 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-oauth-serving-cert\") pod \"831a57b7-e553-4c53-a658-b10d4183d514\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.436692 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k4rs\" (UniqueName: \"kubernetes.io/projected/831a57b7-e553-4c53-a658-b10d4183d514-kube-api-access-5k4rs\") pod \"831a57b7-e553-4c53-a658-b10d4183d514\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.436732 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-oauth-config\") pod \"831a57b7-e553-4c53-a658-b10d4183d514\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.436834 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-serving-cert\") pod \"831a57b7-e553-4c53-a658-b10d4183d514\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.436894 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-trusted-ca-bundle\") pod \"831a57b7-e553-4c53-a658-b10d4183d514\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.436940 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-service-ca\") pod \"831a57b7-e553-4c53-a658-b10d4183d514\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.436971 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-console-config\") pod \"831a57b7-e553-4c53-a658-b10d4183d514\" (UID: \"831a57b7-e553-4c53-a658-b10d4183d514\") " Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.437725 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "831a57b7-e553-4c53-a658-b10d4183d514" (UID: "831a57b7-e553-4c53-a658-b10d4183d514"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.438100 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-service-ca" (OuterVolumeSpecName: "service-ca") pod "831a57b7-e553-4c53-a658-b10d4183d514" (UID: "831a57b7-e553-4c53-a658-b10d4183d514"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.438212 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-console-config" (OuterVolumeSpecName: "console-config") pod "831a57b7-e553-4c53-a658-b10d4183d514" (UID: "831a57b7-e553-4c53-a658-b10d4183d514"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.438404 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "831a57b7-e553-4c53-a658-b10d4183d514" (UID: "831a57b7-e553-4c53-a658-b10d4183d514"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.442726 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/831a57b7-e553-4c53-a658-b10d4183d514-kube-api-access-5k4rs" (OuterVolumeSpecName: "kube-api-access-5k4rs") pod "831a57b7-e553-4c53-a658-b10d4183d514" (UID: "831a57b7-e553-4c53-a658-b10d4183d514"). InnerVolumeSpecName "kube-api-access-5k4rs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.442766 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "831a57b7-e553-4c53-a658-b10d4183d514" (UID: "831a57b7-e553-4c53-a658-b10d4183d514"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.442979 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "831a57b7-e553-4c53-a658-b10d4183d514" (UID: "831a57b7-e553-4c53-a658-b10d4183d514"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.538636 4940 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.538930 4940 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.539020 4940 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.539144 4940 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.539251 4940 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/831a57b7-e553-4c53-a658-b10d4183d514-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.539324 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5k4rs\" (UniqueName: \"kubernetes.io/projected/831a57b7-e553-4c53-a658-b10d4183d514-kube-api-access-5k4rs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.539397 4940 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/831a57b7-e553-4c53-a658-b10d4183d514-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.987809 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-bflcx_831a57b7-e553-4c53-a658-b10d4183d514/console/0.log" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.987899 4940 generic.go:334] "Generic (PLEG): container finished" podID="831a57b7-e553-4c53-a658-b10d4183d514" containerID="1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb" exitCode=2 Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.987932 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-bflcx" event={"ID":"831a57b7-e553-4c53-a658-b10d4183d514","Type":"ContainerDied","Data":"1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb"} Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.987962 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-bflcx" event={"ID":"831a57b7-e553-4c53-a658-b10d4183d514","Type":"ContainerDied","Data":"62261339630dfe60be92d070f395cafa7aea4039d259e062cc971e14e5b8ff8d"} Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.987983 4940 scope.go:117] "RemoveContainer" containerID="1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb" Nov 26 07:08:26 crc kubenswrapper[4940]: I1126 07:08:26.988051 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-bflcx" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.003851 4940 scope.go:117] "RemoveContainer" containerID="1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb" Nov 26 07:08:27 crc kubenswrapper[4940]: E1126 07:08:27.004397 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb\": container with ID starting with 1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb not found: ID does not exist" containerID="1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.004460 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb"} err="failed to get container status \"1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb\": rpc error: code = NotFound desc = could not find container \"1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb\": container with ID starting with 1216b18f5454312ed6087c86d62d350842343b5b04f59d5017c80f295cd2bacb not found: ID does not exist" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.041245 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-bflcx"] Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.049939 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-bflcx"] Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.171437 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="831a57b7-e553-4c53-a658-b10d4183d514" path="/var/lib/kubelet/pods/831a57b7-e553-4c53-a658-b10d4183d514/volumes" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.635170 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qdl7x"] Nov 26 07:08:27 crc kubenswrapper[4940]: E1126 07:08:27.635432 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="831a57b7-e553-4c53-a658-b10d4183d514" containerName="console" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.635455 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="831a57b7-e553-4c53-a658-b10d4183d514" containerName="console" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.635649 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="831a57b7-e553-4c53-a658-b10d4183d514" containerName="console" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.636847 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.649417 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qdl7x"] Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.757248 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-utilities\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.757331 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-catalog-content\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.757351 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wlk6\" (UniqueName: \"kubernetes.io/projected/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-kube-api-access-2wlk6\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.858117 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-catalog-content\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.858170 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wlk6\" (UniqueName: \"kubernetes.io/projected/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-kube-api-access-2wlk6\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.858234 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-utilities\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.858850 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-catalog-content\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.858944 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-utilities\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.875594 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wlk6\" (UniqueName: \"kubernetes.io/projected/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-kube-api-access-2wlk6\") pod \"certified-operators-qdl7x\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:27 crc kubenswrapper[4940]: I1126 07:08:27.955325 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:28 crc kubenswrapper[4940]: I1126 07:08:28.006131 4940 generic.go:334] "Generic (PLEG): container finished" podID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerID="07bf6b868be9d1f235f75558eea15dc49342ee56d547b8ced41951c2d6559c6d" exitCode=0 Nov 26 07:08:28 crc kubenswrapper[4940]: I1126 07:08:28.006183 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" event={"ID":"a177e713-0abb-486a-bd7f-cc80ea4a762e","Type":"ContainerDied","Data":"07bf6b868be9d1f235f75558eea15dc49342ee56d547b8ced41951c2d6559c6d"} Nov 26 07:08:28 crc kubenswrapper[4940]: I1126 07:08:28.209174 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qdl7x"] Nov 26 07:08:29 crc kubenswrapper[4940]: I1126 07:08:29.019105 4940 generic.go:334] "Generic (PLEG): container finished" podID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerID="be1bc187ce3923b21a9e73543023b18d0b3aed1f489e3172305b4502727dd2e7" exitCode=0 Nov 26 07:08:29 crc kubenswrapper[4940]: I1126 07:08:29.019170 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" event={"ID":"a177e713-0abb-486a-bd7f-cc80ea4a762e","Type":"ContainerDied","Data":"be1bc187ce3923b21a9e73543023b18d0b3aed1f489e3172305b4502727dd2e7"} Nov 26 07:08:29 crc kubenswrapper[4940]: I1126 07:08:29.022245 4940 generic.go:334] "Generic (PLEG): container finished" podID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerID="39f20e12623f34e5a38a951f194c0380a0d7e303ac86ea550b004140e544c2d6" exitCode=0 Nov 26 07:08:29 crc kubenswrapper[4940]: I1126 07:08:29.022312 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdl7x" event={"ID":"0a96e1e2-91d3-413c-92c3-7c83e9f911ca","Type":"ContainerDied","Data":"39f20e12623f34e5a38a951f194c0380a0d7e303ac86ea550b004140e544c2d6"} Nov 26 07:08:29 crc kubenswrapper[4940]: I1126 07:08:29.022351 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdl7x" event={"ID":"0a96e1e2-91d3-413c-92c3-7c83e9f911ca","Type":"ContainerStarted","Data":"5ba96d1a66177c6123ab5c973b1d71f5ba02101ee2edfb2192bb595348de9e8e"} Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.031924 4940 generic.go:334] "Generic (PLEG): container finished" podID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerID="d0c3d1526906f22da54e3043cb5cb368dc221eed2556cfe88806de4635f30cca" exitCode=0 Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.032020 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdl7x" event={"ID":"0a96e1e2-91d3-413c-92c3-7c83e9f911ca","Type":"ContainerDied","Data":"d0c3d1526906f22da54e3043cb5cb368dc221eed2556cfe88806de4635f30cca"} Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.286441 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.403611 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-util\") pod \"a177e713-0abb-486a-bd7f-cc80ea4a762e\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.403683 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vj52s\" (UniqueName: \"kubernetes.io/projected/a177e713-0abb-486a-bd7f-cc80ea4a762e-kube-api-access-vj52s\") pod \"a177e713-0abb-486a-bd7f-cc80ea4a762e\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.403702 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-bundle\") pod \"a177e713-0abb-486a-bd7f-cc80ea4a762e\" (UID: \"a177e713-0abb-486a-bd7f-cc80ea4a762e\") " Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.405172 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-bundle" (OuterVolumeSpecName: "bundle") pod "a177e713-0abb-486a-bd7f-cc80ea4a762e" (UID: "a177e713-0abb-486a-bd7f-cc80ea4a762e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.411800 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a177e713-0abb-486a-bd7f-cc80ea4a762e-kube-api-access-vj52s" (OuterVolumeSpecName: "kube-api-access-vj52s") pod "a177e713-0abb-486a-bd7f-cc80ea4a762e" (UID: "a177e713-0abb-486a-bd7f-cc80ea4a762e"). InnerVolumeSpecName "kube-api-access-vj52s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.423548 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-util" (OuterVolumeSpecName: "util") pod "a177e713-0abb-486a-bd7f-cc80ea4a762e" (UID: "a177e713-0abb-486a-bd7f-cc80ea4a762e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.506262 4940 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-util\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.506618 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vj52s\" (UniqueName: \"kubernetes.io/projected/a177e713-0abb-486a-bd7f-cc80ea4a762e-kube-api-access-vj52s\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:30 crc kubenswrapper[4940]: I1126 07:08:30.506632 4940 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a177e713-0abb-486a-bd7f-cc80ea4a762e-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:31 crc kubenswrapper[4940]: I1126 07:08:31.039521 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdl7x" event={"ID":"0a96e1e2-91d3-413c-92c3-7c83e9f911ca","Type":"ContainerStarted","Data":"7c5e1c1010480521c09b134a4b00229e5699ce155a6e6e35f193d2e3803ca562"} Nov 26 07:08:31 crc kubenswrapper[4940]: I1126 07:08:31.042765 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" event={"ID":"a177e713-0abb-486a-bd7f-cc80ea4a762e","Type":"ContainerDied","Data":"629ecf0a6c806e315201b2a165a85649dc554998266ecac57f4797246eb9883c"} Nov 26 07:08:31 crc kubenswrapper[4940]: I1126 07:08:31.042793 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="629ecf0a6c806e315201b2a165a85649dc554998266ecac57f4797246eb9883c" Nov 26 07:08:31 crc kubenswrapper[4940]: I1126 07:08:31.042840 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn" Nov 26 07:08:31 crc kubenswrapper[4940]: I1126 07:08:31.065249 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qdl7x" podStartSLOduration=2.648925191 podStartE2EDuration="4.06523025s" podCreationTimestamp="2025-11-26 07:08:27 +0000 UTC" firstStartedPulling="2025-11-26 07:08:29.024612566 +0000 UTC m=+810.544754225" lastFinishedPulling="2025-11-26 07:08:30.440917665 +0000 UTC m=+811.961059284" observedRunningTime="2025-11-26 07:08:31.063583298 +0000 UTC m=+812.583724927" watchObservedRunningTime="2025-11-26 07:08:31.06523025 +0000 UTC m=+812.585371869" Nov 26 07:08:37 crc kubenswrapper[4940]: I1126 07:08:37.956512 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:37 crc kubenswrapper[4940]: I1126 07:08:37.957128 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:38 crc kubenswrapper[4940]: I1126 07:08:38.014324 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:38 crc kubenswrapper[4940]: I1126 07:08:38.121473 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.644394 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2"] Nov 26 07:08:39 crc kubenswrapper[4940]: E1126 07:08:39.644863 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerName="util" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.644878 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerName="util" Nov 26 07:08:39 crc kubenswrapper[4940]: E1126 07:08:39.644894 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerName="extract" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.644903 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerName="extract" Nov 26 07:08:39 crc kubenswrapper[4940]: E1126 07:08:39.644918 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerName="pull" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.644927 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerName="pull" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.645076 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a177e713-0abb-486a-bd7f-cc80ea4a762e" containerName="extract" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.645980 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.648300 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-qf88j" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.648556 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.649013 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.649073 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.650001 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.669435 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2"] Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.726569 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4415b953-7e66-4d84-acde-32474c6d0ebf-webhook-cert\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.726622 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2hx6\" (UniqueName: \"kubernetes.io/projected/4415b953-7e66-4d84-acde-32474c6d0ebf-kube-api-access-d2hx6\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.726668 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4415b953-7e66-4d84-acde-32474c6d0ebf-apiservice-cert\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.827592 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4415b953-7e66-4d84-acde-32474c6d0ebf-apiservice-cert\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.827686 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4415b953-7e66-4d84-acde-32474c6d0ebf-webhook-cert\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.827715 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2hx6\" (UniqueName: \"kubernetes.io/projected/4415b953-7e66-4d84-acde-32474c6d0ebf-kube-api-access-d2hx6\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.833630 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4415b953-7e66-4d84-acde-32474c6d0ebf-webhook-cert\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.838094 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4415b953-7e66-4d84-acde-32474c6d0ebf-apiservice-cert\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.843069 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2hx6\" (UniqueName: \"kubernetes.io/projected/4415b953-7e66-4d84-acde-32474c6d0ebf-kube-api-access-d2hx6\") pod \"metallb-operator-controller-manager-6f8d6cc986-tlmk2\" (UID: \"4415b953-7e66-4d84-acde-32474c6d0ebf\") " pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.870975 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs"] Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.872069 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.873922 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-55fx6" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.874186 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.874220 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.894709 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs"] Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.929486 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9238138e-b446-46e2-81b3-802bacc8e544-webhook-cert\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.929552 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9238138e-b446-46e2-81b3-802bacc8e544-apiservice-cert\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.929577 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kk6sg\" (UniqueName: \"kubernetes.io/projected/9238138e-b446-46e2-81b3-802bacc8e544-kube-api-access-kk6sg\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:39 crc kubenswrapper[4940]: I1126 07:08:39.960394 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.031763 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9238138e-b446-46e2-81b3-802bacc8e544-webhook-cert\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.031895 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9238138e-b446-46e2-81b3-802bacc8e544-apiservice-cert\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.032823 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kk6sg\" (UniqueName: \"kubernetes.io/projected/9238138e-b446-46e2-81b3-802bacc8e544-kube-api-access-kk6sg\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.036639 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9238138e-b446-46e2-81b3-802bacc8e544-webhook-cert\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.036639 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9238138e-b446-46e2-81b3-802bacc8e544-apiservice-cert\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.055257 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kk6sg\" (UniqueName: \"kubernetes.io/projected/9238138e-b446-46e2-81b3-802bacc8e544-kube-api-access-kk6sg\") pod \"metallb-operator-webhook-server-57fd5d9966-flsrs\" (UID: \"9238138e-b446-46e2-81b3-802bacc8e544\") " pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.195823 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.252681 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2"] Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.423792 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qdl7x"] Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.424106 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qdl7x" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerName="registry-server" containerID="cri-o://7c5e1c1010480521c09b134a4b00229e5699ce155a6e6e35f193d2e3803ca562" gracePeriod=2 Nov 26 07:08:40 crc kubenswrapper[4940]: I1126 07:08:40.552908 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs"] Nov 26 07:08:40 crc kubenswrapper[4940]: W1126 07:08:40.557737 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9238138e_b446_46e2_81b3_802bacc8e544.slice/crio-9416f49b354f987e5014f2e3e7f3d721156d3551f795409d13677249c9132c70 WatchSource:0}: Error finding container 9416f49b354f987e5014f2e3e7f3d721156d3551f795409d13677249c9132c70: Status 404 returned error can't find the container with id 9416f49b354f987e5014f2e3e7f3d721156d3551f795409d13677249c9132c70 Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.108271 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" event={"ID":"4415b953-7e66-4d84-acde-32474c6d0ebf","Type":"ContainerStarted","Data":"505daeef29c76fce3e064622a117b1b68582964bcbb37cf1a8d2833698c5a966"} Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.111073 4940 generic.go:334] "Generic (PLEG): container finished" podID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerID="7c5e1c1010480521c09b134a4b00229e5699ce155a6e6e35f193d2e3803ca562" exitCode=0 Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.111142 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdl7x" event={"ID":"0a96e1e2-91d3-413c-92c3-7c83e9f911ca","Type":"ContainerDied","Data":"7c5e1c1010480521c09b134a4b00229e5699ce155a6e6e35f193d2e3803ca562"} Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.112181 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" event={"ID":"9238138e-b446-46e2-81b3-802bacc8e544","Type":"ContainerStarted","Data":"9416f49b354f987e5014f2e3e7f3d721156d3551f795409d13677249c9132c70"} Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.410933 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.568474 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-utilities\") pod \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.568554 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-catalog-content\") pod \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.568592 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wlk6\" (UniqueName: \"kubernetes.io/projected/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-kube-api-access-2wlk6\") pod \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\" (UID: \"0a96e1e2-91d3-413c-92c3-7c83e9f911ca\") " Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.569405 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-utilities" (OuterVolumeSpecName: "utilities") pod "0a96e1e2-91d3-413c-92c3-7c83e9f911ca" (UID: "0a96e1e2-91d3-413c-92c3-7c83e9f911ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.577632 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-kube-api-access-2wlk6" (OuterVolumeSpecName: "kube-api-access-2wlk6") pod "0a96e1e2-91d3-413c-92c3-7c83e9f911ca" (UID: "0a96e1e2-91d3-413c-92c3-7c83e9f911ca"). InnerVolumeSpecName "kube-api-access-2wlk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.628346 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a96e1e2-91d3-413c-92c3-7c83e9f911ca" (UID: "0a96e1e2-91d3-413c-92c3-7c83e9f911ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.669886 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.669922 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:41 crc kubenswrapper[4940]: I1126 07:08:41.669935 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wlk6\" (UniqueName: \"kubernetes.io/projected/0a96e1e2-91d3-413c-92c3-7c83e9f911ca-kube-api-access-2wlk6\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:42 crc kubenswrapper[4940]: I1126 07:08:42.134788 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qdl7x" event={"ID":"0a96e1e2-91d3-413c-92c3-7c83e9f911ca","Type":"ContainerDied","Data":"5ba96d1a66177c6123ab5c973b1d71f5ba02101ee2edfb2192bb595348de9e8e"} Nov 26 07:08:42 crc kubenswrapper[4940]: I1126 07:08:42.134855 4940 scope.go:117] "RemoveContainer" containerID="7c5e1c1010480521c09b134a4b00229e5699ce155a6e6e35f193d2e3803ca562" Nov 26 07:08:42 crc kubenswrapper[4940]: I1126 07:08:42.134880 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qdl7x" Nov 26 07:08:42 crc kubenswrapper[4940]: I1126 07:08:42.149199 4940 scope.go:117] "RemoveContainer" containerID="d0c3d1526906f22da54e3043cb5cb368dc221eed2556cfe88806de4635f30cca" Nov 26 07:08:42 crc kubenswrapper[4940]: I1126 07:08:42.169479 4940 scope.go:117] "RemoveContainer" containerID="39f20e12623f34e5a38a951f194c0380a0d7e303ac86ea550b004140e544c2d6" Nov 26 07:08:42 crc kubenswrapper[4940]: I1126 07:08:42.183622 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qdl7x"] Nov 26 07:08:42 crc kubenswrapper[4940]: I1126 07:08:42.192290 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qdl7x"] Nov 26 07:08:43 crc kubenswrapper[4940]: I1126 07:08:43.180958 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" path="/var/lib/kubelet/pods/0a96e1e2-91d3-413c-92c3-7c83e9f911ca/volumes" Nov 26 07:08:46 crc kubenswrapper[4940]: I1126 07:08:46.160704 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" event={"ID":"4415b953-7e66-4d84-acde-32474c6d0ebf","Type":"ContainerStarted","Data":"48e7311a33f43c5297b58b8f4ae26329f15419cde8fa47af386f41bebc35df2a"} Nov 26 07:08:46 crc kubenswrapper[4940]: I1126 07:08:46.161794 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:08:46 crc kubenswrapper[4940]: I1126 07:08:46.163602 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" event={"ID":"9238138e-b446-46e2-81b3-802bacc8e544","Type":"ContainerStarted","Data":"61b591b4fac8ada8ae88345e969773df4d703a3a7ef0e5f4979ded7931c5c15f"} Nov 26 07:08:46 crc kubenswrapper[4940]: I1126 07:08:46.164016 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:08:46 crc kubenswrapper[4940]: I1126 07:08:46.185750 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" podStartSLOduration=2.350017267 podStartE2EDuration="7.185732622s" podCreationTimestamp="2025-11-26 07:08:39 +0000 UTC" firstStartedPulling="2025-11-26 07:08:40.303086758 +0000 UTC m=+821.823228367" lastFinishedPulling="2025-11-26 07:08:45.138802103 +0000 UTC m=+826.658943722" observedRunningTime="2025-11-26 07:08:46.183065968 +0000 UTC m=+827.703207587" watchObservedRunningTime="2025-11-26 07:08:46.185732622 +0000 UTC m=+827.705874241" Nov 26 07:08:46 crc kubenswrapper[4940]: I1126 07:08:46.215081 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" podStartSLOduration=2.62084226 podStartE2EDuration="7.215025382s" podCreationTimestamp="2025-11-26 07:08:39 +0000 UTC" firstStartedPulling="2025-11-26 07:08:40.560265872 +0000 UTC m=+822.080407491" lastFinishedPulling="2025-11-26 07:08:45.154448994 +0000 UTC m=+826.674590613" observedRunningTime="2025-11-26 07:08:46.207758324 +0000 UTC m=+827.727899953" watchObservedRunningTime="2025-11-26 07:08:46.215025382 +0000 UTC m=+827.735167051" Nov 26 07:09:00 crc kubenswrapper[4940]: I1126 07:09:00.200265 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-57fd5d9966-flsrs" Nov 26 07:09:19 crc kubenswrapper[4940]: I1126 07:09:19.966067 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.610103 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-jxkgj"] Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.610879 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerName="extract-utilities" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.610902 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerName="extract-utilities" Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.610942 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerName="registry-server" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.610952 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerName="registry-server" Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.610964 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerName="extract-content" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.610977 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerName="extract-content" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.611284 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a96e1e2-91d3-413c-92c3-7c83e9f911ca" containerName="registry-server" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.619630 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c"] Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.619894 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.621038 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.626883 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-wf8qf" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.627250 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.633084 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.633343 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.637055 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c"] Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.705380 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-4rnjg"] Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.706464 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.708756 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.709105 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-77wsc" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.709241 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.712144 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-lphz8"] Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.713032 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.713701 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.714459 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.724653 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-lphz8"] Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765448 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbgm4\" (UniqueName: \"kubernetes.io/projected/1679bb4d-d17d-4e92-b72a-55e5927bedd6-kube-api-access-sbgm4\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765488 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8c4f4ea4-d726-4477-a94e-d23464e00e6a-metrics-certs\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765509 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09a94cb0-e4d0-4945-9f9e-9912f64ee105-cert\") pod \"frr-k8s-webhook-server-6998585d5-lwp6c\" (UID: \"09a94cb0-e4d0-4945-9f9e-9912f64ee105\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765533 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-metrics-certs\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765549 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-reloader\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765721 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765796 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fqn2\" (UniqueName: \"kubernetes.io/projected/8c4f4ea4-d726-4477-a94e-d23464e00e6a-kube-api-access-2fqn2\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765851 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-startup\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765901 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-sockets\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765930 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-cert\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.765948 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-conf\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.766009 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn8vm\" (UniqueName: \"kubernetes.io/projected/faeaf9d9-6067-4ecd-b240-4909087180dc-kube-api-access-xn8vm\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.766056 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metallb-excludel2\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.766080 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metrics-certs\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.766114 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c65sg\" (UniqueName: \"kubernetes.io/projected/09a94cb0-e4d0-4945-9f9e-9912f64ee105-kube-api-access-c65sg\") pod \"frr-k8s-webhook-server-6998585d5-lwp6c\" (UID: \"09a94cb0-e4d0-4945-9f9e-9912f64ee105\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.766222 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-metrics\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867170 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-sockets\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867227 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-cert\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867247 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-conf\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867271 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn8vm\" (UniqueName: \"kubernetes.io/projected/faeaf9d9-6067-4ecd-b240-4909087180dc-kube-api-access-xn8vm\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867298 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metallb-excludel2\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867318 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metrics-certs\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867342 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c65sg\" (UniqueName: \"kubernetes.io/projected/09a94cb0-e4d0-4945-9f9e-9912f64ee105-kube-api-access-c65sg\") pod \"frr-k8s-webhook-server-6998585d5-lwp6c\" (UID: \"09a94cb0-e4d0-4945-9f9e-9912f64ee105\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867401 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-metrics\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867431 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbgm4\" (UniqueName: \"kubernetes.io/projected/1679bb4d-d17d-4e92-b72a-55e5927bedd6-kube-api-access-sbgm4\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867448 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8c4f4ea4-d726-4477-a94e-d23464e00e6a-metrics-certs\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867470 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09a94cb0-e4d0-4945-9f9e-9912f64ee105-cert\") pod \"frr-k8s-webhook-server-6998585d5-lwp6c\" (UID: \"09a94cb0-e4d0-4945-9f9e-9912f64ee105\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867502 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-metrics-certs\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867524 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-reloader\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867562 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867596 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fqn2\" (UniqueName: \"kubernetes.io/projected/8c4f4ea4-d726-4477-a94e-d23464e00e6a-kube-api-access-2fqn2\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.867620 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-startup\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.868625 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-startup\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.868886 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-sockets\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.869214 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-frr-conf\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.869990 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metallb-excludel2\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.870080 4940 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.870123 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metrics-certs podName:1679bb4d-d17d-4e92-b72a-55e5927bedd6 nodeName:}" failed. No retries permitted until 2025-11-26 07:09:21.370108086 +0000 UTC m=+862.890249705 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metrics-certs") pod "speaker-4rnjg" (UID: "1679bb4d-d17d-4e92-b72a-55e5927bedd6") : secret "speaker-certs-secret" not found Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.870572 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-metrics\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.871583 4940 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.871630 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist podName:1679bb4d-d17d-4e92-b72a-55e5927bedd6 nodeName:}" failed. No retries permitted until 2025-11-26 07:09:21.371615183 +0000 UTC m=+862.891756802 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist") pod "speaker-4rnjg" (UID: "1679bb4d-d17d-4e92-b72a-55e5927bedd6") : secret "metallb-memberlist" not found Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.871673 4940 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 26 07:09:20 crc kubenswrapper[4940]: E1126 07:09:20.871696 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-metrics-certs podName:faeaf9d9-6067-4ecd-b240-4909087180dc nodeName:}" failed. No retries permitted until 2025-11-26 07:09:21.371688626 +0000 UTC m=+862.891830255 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-metrics-certs") pod "controller-6c7b4b5f48-lphz8" (UID: "faeaf9d9-6067-4ecd-b240-4909087180dc") : secret "controller-certs-secret" not found Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.871892 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8c4f4ea4-d726-4477-a94e-d23464e00e6a-reloader\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.881977 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09a94cb0-e4d0-4945-9f9e-9912f64ee105-cert\") pod \"frr-k8s-webhook-server-6998585d5-lwp6c\" (UID: \"09a94cb0-e4d0-4945-9f9e-9912f64ee105\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.886607 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8c4f4ea4-d726-4477-a94e-d23464e00e6a-metrics-certs\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.886926 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.904598 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fqn2\" (UniqueName: \"kubernetes.io/projected/8c4f4ea4-d726-4477-a94e-d23464e00e6a-kube-api-access-2fqn2\") pod \"frr-k8s-jxkgj\" (UID: \"8c4f4ea4-d726-4477-a94e-d23464e00e6a\") " pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.915822 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c65sg\" (UniqueName: \"kubernetes.io/projected/09a94cb0-e4d0-4945-9f9e-9912f64ee105-kube-api-access-c65sg\") pod \"frr-k8s-webhook-server-6998585d5-lwp6c\" (UID: \"09a94cb0-e4d0-4945-9f9e-9912f64ee105\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.925704 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbgm4\" (UniqueName: \"kubernetes.io/projected/1679bb4d-d17d-4e92-b72a-55e5927bedd6-kube-api-access-sbgm4\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.926426 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-cert\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.930308 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn8vm\" (UniqueName: \"kubernetes.io/projected/faeaf9d9-6067-4ecd-b240-4909087180dc-kube-api-access-xn8vm\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.955275 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:20 crc kubenswrapper[4940]: I1126 07:09:20.968397 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:21 crc kubenswrapper[4940]: I1126 07:09:21.374513 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metrics-certs\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:21 crc kubenswrapper[4940]: I1126 07:09:21.374991 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-metrics-certs\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:21 crc kubenswrapper[4940]: I1126 07:09:21.375146 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:21 crc kubenswrapper[4940]: E1126 07:09:21.375288 4940 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 07:09:21 crc kubenswrapper[4940]: E1126 07:09:21.375354 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist podName:1679bb4d-d17d-4e92-b72a-55e5927bedd6 nodeName:}" failed. No retries permitted until 2025-11-26 07:09:22.375333598 +0000 UTC m=+863.895475217 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist") pod "speaker-4rnjg" (UID: "1679bb4d-d17d-4e92-b72a-55e5927bedd6") : secret "metallb-memberlist" not found Nov 26 07:09:21 crc kubenswrapper[4940]: I1126 07:09:21.381701 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/faeaf9d9-6067-4ecd-b240-4909087180dc-metrics-certs\") pod \"controller-6c7b4b5f48-lphz8\" (UID: \"faeaf9d9-6067-4ecd-b240-4909087180dc\") " pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:21 crc kubenswrapper[4940]: I1126 07:09:21.381701 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-metrics-certs\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:21 crc kubenswrapper[4940]: I1126 07:09:21.382717 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c"] Nov 26 07:09:21 crc kubenswrapper[4940]: I1126 07:09:21.388047 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerStarted","Data":"6c6cbddd8510f949e53617c6602021cefb12b9dc12e1594d4c4bd4254f82a16e"} Nov 26 07:09:21 crc kubenswrapper[4940]: W1126 07:09:21.388470 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09a94cb0_e4d0_4945_9f9e_9912f64ee105.slice/crio-8d1303d83206bb34f360cb205a108b979db574184ef1462ae35932f6305b6e1e WatchSource:0}: Error finding container 8d1303d83206bb34f360cb205a108b979db574184ef1462ae35932f6305b6e1e: Status 404 returned error can't find the container with id 8d1303d83206bb34f360cb205a108b979db574184ef1462ae35932f6305b6e1e Nov 26 07:09:21 crc kubenswrapper[4940]: I1126 07:09:21.627445 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.005413 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-lphz8"] Nov 26 07:09:22 crc kubenswrapper[4940]: W1126 07:09:22.017612 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfaeaf9d9_6067_4ecd_b240_4909087180dc.slice/crio-9b3493e85c53a034e5363de64e2799a98105a978034d0ad3ac26d411e271541a WatchSource:0}: Error finding container 9b3493e85c53a034e5363de64e2799a98105a978034d0ad3ac26d411e271541a: Status 404 returned error can't find the container with id 9b3493e85c53a034e5363de64e2799a98105a978034d0ad3ac26d411e271541a Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.388872 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.395270 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-lphz8" event={"ID":"faeaf9d9-6067-4ecd-b240-4909087180dc","Type":"ContainerStarted","Data":"4cbc6e848a4926383304595b6865d5dc6055eb29bed61bed135d335622a224b1"} Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.395308 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-lphz8" event={"ID":"faeaf9d9-6067-4ecd-b240-4909087180dc","Type":"ContainerStarted","Data":"103408c277b2c9622d6a55d19d3ce1e2906e8e133822544b7b53f7e3447273d4"} Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.395318 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-lphz8" event={"ID":"faeaf9d9-6067-4ecd-b240-4909087180dc","Type":"ContainerStarted","Data":"9b3493e85c53a034e5363de64e2799a98105a978034d0ad3ac26d411e271541a"} Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.396024 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.396930 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" event={"ID":"09a94cb0-e4d0-4945-9f9e-9912f64ee105","Type":"ContainerStarted","Data":"8d1303d83206bb34f360cb205a108b979db574184ef1462ae35932f6305b6e1e"} Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.404361 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1679bb4d-d17d-4e92-b72a-55e5927bedd6-memberlist\") pod \"speaker-4rnjg\" (UID: \"1679bb4d-d17d-4e92-b72a-55e5927bedd6\") " pod="metallb-system/speaker-4rnjg" Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.416384 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-lphz8" podStartSLOduration=2.416363075 podStartE2EDuration="2.416363075s" podCreationTimestamp="2025-11-26 07:09:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:09:22.413208466 +0000 UTC m=+863.933350085" watchObservedRunningTime="2025-11-26 07:09:22.416363075 +0000 UTC m=+863.936504694" Nov 26 07:09:22 crc kubenswrapper[4940]: I1126 07:09:22.544603 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-4rnjg" Nov 26 07:09:23 crc kubenswrapper[4940]: I1126 07:09:23.418831 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-4rnjg" event={"ID":"1679bb4d-d17d-4e92-b72a-55e5927bedd6","Type":"ContainerStarted","Data":"c5e6b85d4842dd9f9118236201f3e7fdc5757a79f9f3906ed0c15b8af317e1ca"} Nov 26 07:09:23 crc kubenswrapper[4940]: I1126 07:09:23.419151 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-4rnjg" event={"ID":"1679bb4d-d17d-4e92-b72a-55e5927bedd6","Type":"ContainerStarted","Data":"3843b6acd6d2b5a0254ada7f70fe6f0aef43aa7231eea7c5a098a6000cb1fe71"} Nov 26 07:09:23 crc kubenswrapper[4940]: I1126 07:09:23.419164 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-4rnjg" event={"ID":"1679bb4d-d17d-4e92-b72a-55e5927bedd6","Type":"ContainerStarted","Data":"440df0b1a62ff004be09995006c5ada5c331156b7f8fd9998d943f415632664b"} Nov 26 07:09:23 crc kubenswrapper[4940]: I1126 07:09:23.419380 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-4rnjg" Nov 26 07:09:23 crc kubenswrapper[4940]: I1126 07:09:23.440144 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-4rnjg" podStartSLOduration=3.44012019 podStartE2EDuration="3.44012019s" podCreationTimestamp="2025-11-26 07:09:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:09:23.438713655 +0000 UTC m=+864.958855274" watchObservedRunningTime="2025-11-26 07:09:23.44012019 +0000 UTC m=+864.960261809" Nov 26 07:09:28 crc kubenswrapper[4940]: I1126 07:09:28.448808 4940 generic.go:334] "Generic (PLEG): container finished" podID="8c4f4ea4-d726-4477-a94e-d23464e00e6a" containerID="b2838a49a9009e14c798055c912eecd5dc3feeccd8ba6268934e0d20c2c0bc3b" exitCode=0 Nov 26 07:09:28 crc kubenswrapper[4940]: I1126 07:09:28.448867 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerDied","Data":"b2838a49a9009e14c798055c912eecd5dc3feeccd8ba6268934e0d20c2c0bc3b"} Nov 26 07:09:28 crc kubenswrapper[4940]: I1126 07:09:28.453463 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" event={"ID":"09a94cb0-e4d0-4945-9f9e-9912f64ee105","Type":"ContainerStarted","Data":"348f7337ebe97a601b36ddbf51b781e45a34d8d4fd3b9d2e88e40a77ffb5ce8c"} Nov 26 07:09:28 crc kubenswrapper[4940]: I1126 07:09:28.453658 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:28 crc kubenswrapper[4940]: I1126 07:09:28.536632 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" podStartSLOduration=2.482369802 podStartE2EDuration="8.536609445s" podCreationTimestamp="2025-11-26 07:09:20 +0000 UTC" firstStartedPulling="2025-11-26 07:09:21.391548708 +0000 UTC m=+862.911690327" lastFinishedPulling="2025-11-26 07:09:27.445788351 +0000 UTC m=+868.965929970" observedRunningTime="2025-11-26 07:09:28.529293876 +0000 UTC m=+870.049435495" watchObservedRunningTime="2025-11-26 07:09:28.536609445 +0000 UTC m=+870.056751064" Nov 26 07:09:29 crc kubenswrapper[4940]: I1126 07:09:29.464656 4940 generic.go:334] "Generic (PLEG): container finished" podID="8c4f4ea4-d726-4477-a94e-d23464e00e6a" containerID="e9159294b42f09a7af7034575557ed4a71395b7543f624188942b80107ee2136" exitCode=0 Nov 26 07:09:29 crc kubenswrapper[4940]: I1126 07:09:29.464720 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerDied","Data":"e9159294b42f09a7af7034575557ed4a71395b7543f624188942b80107ee2136"} Nov 26 07:09:30 crc kubenswrapper[4940]: I1126 07:09:30.473216 4940 generic.go:334] "Generic (PLEG): container finished" podID="8c4f4ea4-d726-4477-a94e-d23464e00e6a" containerID="2a8c7386f640b2c4c93195171f3d7b6a68b0faa7e283e1bdd2b333969cf831d2" exitCode=0 Nov 26 07:09:30 crc kubenswrapper[4940]: I1126 07:09:30.473288 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerDied","Data":"2a8c7386f640b2c4c93195171f3d7b6a68b0faa7e283e1bdd2b333969cf831d2"} Nov 26 07:09:31 crc kubenswrapper[4940]: I1126 07:09:31.485370 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerStarted","Data":"9759af7ee59b792d92891e58fad7f6157c162a694f0e134545ec62e1b5a3fb4c"} Nov 26 07:09:31 crc kubenswrapper[4940]: I1126 07:09:31.485809 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:31 crc kubenswrapper[4940]: I1126 07:09:31.485825 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerStarted","Data":"87a7d4442dd6dc639e15d052e2bfd7dbc4f2e4fce6b7e569a32d5510b0889cf9"} Nov 26 07:09:31 crc kubenswrapper[4940]: I1126 07:09:31.485838 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerStarted","Data":"97359526b475bd9c00ec128300cb8e91da9b6311bdeb2f9e4ec7032755220ef0"} Nov 26 07:09:31 crc kubenswrapper[4940]: I1126 07:09:31.485849 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerStarted","Data":"98d7c6c09b9f09bd8c509a5e5f3d07a7fc0922d6235dd8781d9c245c482fd65b"} Nov 26 07:09:31 crc kubenswrapper[4940]: I1126 07:09:31.485859 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerStarted","Data":"a714d736c56bdfe0dd47d1ba6b34447f65ae2413f3dbc937bd623a297ce260ac"} Nov 26 07:09:31 crc kubenswrapper[4940]: I1126 07:09:31.485871 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jxkgj" event={"ID":"8c4f4ea4-d726-4477-a94e-d23464e00e6a","Type":"ContainerStarted","Data":"4a93a93c510988382a08ce0386b40d0581e4993aa5f286c41b31828376e54079"} Nov 26 07:09:31 crc kubenswrapper[4940]: I1126 07:09:31.508583 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-jxkgj" podStartSLOduration=5.237449041 podStartE2EDuration="11.50855518s" podCreationTimestamp="2025-11-26 07:09:20 +0000 UTC" firstStartedPulling="2025-11-26 07:09:21.159651239 +0000 UTC m=+862.679792858" lastFinishedPulling="2025-11-26 07:09:27.430757378 +0000 UTC m=+868.950898997" observedRunningTime="2025-11-26 07:09:31.507503317 +0000 UTC m=+873.027644936" watchObservedRunningTime="2025-11-26 07:09:31.50855518 +0000 UTC m=+873.028696839" Nov 26 07:09:32 crc kubenswrapper[4940]: I1126 07:09:32.551649 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-4rnjg" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.555771 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m"] Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.556827 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.558652 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.568479 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m"] Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.666651 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nl565\" (UniqueName: \"kubernetes.io/projected/737d785a-247d-4d8f-ba8d-d2488bf741c3-kube-api-access-nl565\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.666782 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.666907 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.768157 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nl565\" (UniqueName: \"kubernetes.io/projected/737d785a-247d-4d8f-ba8d-d2488bf741c3-kube-api-access-nl565\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.768215 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.768265 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.768832 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.768939 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.790641 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nl565\" (UniqueName: \"kubernetes.io/projected/737d785a-247d-4d8f-ba8d-d2488bf741c3-kube-api-access-nl565\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:34 crc kubenswrapper[4940]: I1126 07:09:34.876505 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:35 crc kubenswrapper[4940]: I1126 07:09:35.266663 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m"] Nov 26 07:09:35 crc kubenswrapper[4940]: W1126 07:09:35.271767 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod737d785a_247d_4d8f_ba8d_d2488bf741c3.slice/crio-089ea9b2d04b247e5a37bcea6e5c6f97c99e974e62db38bf43947f7b953861c4 WatchSource:0}: Error finding container 089ea9b2d04b247e5a37bcea6e5c6f97c99e974e62db38bf43947f7b953861c4: Status 404 returned error can't find the container with id 089ea9b2d04b247e5a37bcea6e5c6f97c99e974e62db38bf43947f7b953861c4 Nov 26 07:09:35 crc kubenswrapper[4940]: I1126 07:09:35.511503 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" event={"ID":"737d785a-247d-4d8f-ba8d-d2488bf741c3","Type":"ContainerStarted","Data":"8fea26b5da1cf14a3312cb411a3512ca16204dc6db0622746474d34d7dd70efc"} Nov 26 07:09:35 crc kubenswrapper[4940]: I1126 07:09:35.511546 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" event={"ID":"737d785a-247d-4d8f-ba8d-d2488bf741c3","Type":"ContainerStarted","Data":"089ea9b2d04b247e5a37bcea6e5c6f97c99e974e62db38bf43947f7b953861c4"} Nov 26 07:09:35 crc kubenswrapper[4940]: I1126 07:09:35.957381 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:36 crc kubenswrapper[4940]: I1126 07:09:36.007175 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:36 crc kubenswrapper[4940]: I1126 07:09:36.519318 4940 generic.go:334] "Generic (PLEG): container finished" podID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerID="8fea26b5da1cf14a3312cb411a3512ca16204dc6db0622746474d34d7dd70efc" exitCode=0 Nov 26 07:09:36 crc kubenswrapper[4940]: I1126 07:09:36.519382 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" event={"ID":"737d785a-247d-4d8f-ba8d-d2488bf741c3","Type":"ContainerDied","Data":"8fea26b5da1cf14a3312cb411a3512ca16204dc6db0622746474d34d7dd70efc"} Nov 26 07:09:40 crc kubenswrapper[4940]: I1126 07:09:40.544358 4940 generic.go:334] "Generic (PLEG): container finished" podID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerID="83412f55ba0147b4d31a55c7fc78cdebfc649ac11e1ed8f53eede7236ff761bf" exitCode=0 Nov 26 07:09:40 crc kubenswrapper[4940]: I1126 07:09:40.544441 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" event={"ID":"737d785a-247d-4d8f-ba8d-d2488bf741c3","Type":"ContainerDied","Data":"83412f55ba0147b4d31a55c7fc78cdebfc649ac11e1ed8f53eede7236ff761bf"} Nov 26 07:09:40 crc kubenswrapper[4940]: I1126 07:09:40.959415 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-jxkgj" Nov 26 07:09:40 crc kubenswrapper[4940]: I1126 07:09:40.974197 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-lwp6c" Nov 26 07:09:41 crc kubenswrapper[4940]: I1126 07:09:41.552559 4940 generic.go:334] "Generic (PLEG): container finished" podID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerID="8ff9a52e560409f4cf43fd30db327bc88b4b60ce5aeaf65db7e06697cd594a98" exitCode=0 Nov 26 07:09:41 crc kubenswrapper[4940]: I1126 07:09:41.552626 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" event={"ID":"737d785a-247d-4d8f-ba8d-d2488bf741c3","Type":"ContainerDied","Data":"8ff9a52e560409f4cf43fd30db327bc88b4b60ce5aeaf65db7e06697cd594a98"} Nov 26 07:09:41 crc kubenswrapper[4940]: I1126 07:09:41.631911 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-lphz8" Nov 26 07:09:42 crc kubenswrapper[4940]: I1126 07:09:42.830222 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:42 crc kubenswrapper[4940]: I1126 07:09:42.974529 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nl565\" (UniqueName: \"kubernetes.io/projected/737d785a-247d-4d8f-ba8d-d2488bf741c3-kube-api-access-nl565\") pod \"737d785a-247d-4d8f-ba8d-d2488bf741c3\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " Nov 26 07:09:42 crc kubenswrapper[4940]: I1126 07:09:42.974638 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-util\") pod \"737d785a-247d-4d8f-ba8d-d2488bf741c3\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " Nov 26 07:09:42 crc kubenswrapper[4940]: I1126 07:09:42.974677 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-bundle\") pod \"737d785a-247d-4d8f-ba8d-d2488bf741c3\" (UID: \"737d785a-247d-4d8f-ba8d-d2488bf741c3\") " Nov 26 07:09:42 crc kubenswrapper[4940]: I1126 07:09:42.975705 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-bundle" (OuterVolumeSpecName: "bundle") pod "737d785a-247d-4d8f-ba8d-d2488bf741c3" (UID: "737d785a-247d-4d8f-ba8d-d2488bf741c3"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:42 crc kubenswrapper[4940]: I1126 07:09:42.979440 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/737d785a-247d-4d8f-ba8d-d2488bf741c3-kube-api-access-nl565" (OuterVolumeSpecName: "kube-api-access-nl565") pod "737d785a-247d-4d8f-ba8d-d2488bf741c3" (UID: "737d785a-247d-4d8f-ba8d-d2488bf741c3"). InnerVolumeSpecName "kube-api-access-nl565". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:42 crc kubenswrapper[4940]: I1126 07:09:42.986697 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-util" (OuterVolumeSpecName: "util") pod "737d785a-247d-4d8f-ba8d-d2488bf741c3" (UID: "737d785a-247d-4d8f-ba8d-d2488bf741c3"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:43 crc kubenswrapper[4940]: I1126 07:09:43.076026 4940 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:43 crc kubenswrapper[4940]: I1126 07:09:43.076087 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nl565\" (UniqueName: \"kubernetes.io/projected/737d785a-247d-4d8f-ba8d-d2488bf741c3-kube-api-access-nl565\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:43 crc kubenswrapper[4940]: I1126 07:09:43.076102 4940 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/737d785a-247d-4d8f-ba8d-d2488bf741c3-util\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:43 crc kubenswrapper[4940]: I1126 07:09:43.567638 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" event={"ID":"737d785a-247d-4d8f-ba8d-d2488bf741c3","Type":"ContainerDied","Data":"089ea9b2d04b247e5a37bcea6e5c6f97c99e974e62db38bf43947f7b953861c4"} Nov 26 07:09:43 crc kubenswrapper[4940]: I1126 07:09:43.567686 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="089ea9b2d04b247e5a37bcea6e5c6f97c99e974e62db38bf43947f7b953861c4" Nov 26 07:09:43 crc kubenswrapper[4940]: I1126 07:09:43.567687 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.849431 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7"] Nov 26 07:09:47 crc kubenswrapper[4940]: E1126 07:09:47.850391 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerName="util" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.850416 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerName="util" Nov 26 07:09:47 crc kubenswrapper[4940]: E1126 07:09:47.850450 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerName="pull" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.850462 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerName="pull" Nov 26 07:09:47 crc kubenswrapper[4940]: E1126 07:09:47.850481 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerName="extract" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.850491 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerName="extract" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.850667 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="737d785a-247d-4d8f-ba8d-d2488bf741c3" containerName="extract" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.851312 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.856870 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.857101 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.857280 4940 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-vgvsv" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.881724 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7"] Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.934307 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l46w\" (UniqueName: \"kubernetes.io/projected/0c74a263-a393-4e68-9dd2-3f209cef5d67-kube-api-access-9l46w\") pod \"cert-manager-operator-controller-manager-64cf6dff88-wjbv7\" (UID: \"0c74a263-a393-4e68-9dd2-3f209cef5d67\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" Nov 26 07:09:47 crc kubenswrapper[4940]: I1126 07:09:47.934398 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0c74a263-a393-4e68-9dd2-3f209cef5d67-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-wjbv7\" (UID: \"0c74a263-a393-4e68-9dd2-3f209cef5d67\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" Nov 26 07:09:48 crc kubenswrapper[4940]: I1126 07:09:48.035976 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l46w\" (UniqueName: \"kubernetes.io/projected/0c74a263-a393-4e68-9dd2-3f209cef5d67-kube-api-access-9l46w\") pod \"cert-manager-operator-controller-manager-64cf6dff88-wjbv7\" (UID: \"0c74a263-a393-4e68-9dd2-3f209cef5d67\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" Nov 26 07:09:48 crc kubenswrapper[4940]: I1126 07:09:48.036322 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0c74a263-a393-4e68-9dd2-3f209cef5d67-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-wjbv7\" (UID: \"0c74a263-a393-4e68-9dd2-3f209cef5d67\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" Nov 26 07:09:48 crc kubenswrapper[4940]: I1126 07:09:48.036874 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0c74a263-a393-4e68-9dd2-3f209cef5d67-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-wjbv7\" (UID: \"0c74a263-a393-4e68-9dd2-3f209cef5d67\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" Nov 26 07:09:48 crc kubenswrapper[4940]: I1126 07:09:48.053482 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l46w\" (UniqueName: \"kubernetes.io/projected/0c74a263-a393-4e68-9dd2-3f209cef5d67-kube-api-access-9l46w\") pod \"cert-manager-operator-controller-manager-64cf6dff88-wjbv7\" (UID: \"0c74a263-a393-4e68-9dd2-3f209cef5d67\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" Nov 26 07:09:48 crc kubenswrapper[4940]: I1126 07:09:48.175701 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" Nov 26 07:09:48 crc kubenswrapper[4940]: I1126 07:09:48.609841 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7"] Nov 26 07:09:49 crc kubenswrapper[4940]: I1126 07:09:49.599825 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" event={"ID":"0c74a263-a393-4e68-9dd2-3f209cef5d67","Type":"ContainerStarted","Data":"e870ff8069dee14a436ac3b31378043010739b33853880867418b37e43234644"} Nov 26 07:09:55 crc kubenswrapper[4940]: I1126 07:09:55.639814 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" event={"ID":"0c74a263-a393-4e68-9dd2-3f209cef5d67","Type":"ContainerStarted","Data":"2374fee1f1b00c281924d679615d599ad36f0f43496c5b7ac2046fd36d83f469"} Nov 26 07:09:55 crc kubenswrapper[4940]: I1126 07:09:55.661292 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-wjbv7" podStartSLOduration=2.385918293 podStartE2EDuration="8.661272085s" podCreationTimestamp="2025-11-26 07:09:47 +0000 UTC" firstStartedPulling="2025-11-26 07:09:48.617998823 +0000 UTC m=+890.138140442" lastFinishedPulling="2025-11-26 07:09:54.893352615 +0000 UTC m=+896.413494234" observedRunningTime="2025-11-26 07:09:55.656627889 +0000 UTC m=+897.176769518" watchObservedRunningTime="2025-11-26 07:09:55.661272085 +0000 UTC m=+897.181413704" Nov 26 07:09:59 crc kubenswrapper[4940]: I1126 07:09:59.983569 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6"] Nov 26 07:09:59 crc kubenswrapper[4940]: I1126 07:09:59.984920 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" Nov 26 07:09:59 crc kubenswrapper[4940]: I1126 07:09:59.987508 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 07:09:59 crc kubenswrapper[4940]: I1126 07:09:59.987885 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 07:09:59 crc kubenswrapper[4940]: I1126 07:09:59.987918 4940 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-fr28m" Nov 26 07:09:59 crc kubenswrapper[4940]: I1126 07:09:59.998942 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6"] Nov 26 07:10:00 crc kubenswrapper[4940]: I1126 07:10:00.100628 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3632747e-e8d6-4971-a5bf-d07117d69ae7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-pjqx6\" (UID: \"3632747e-e8d6-4971-a5bf-d07117d69ae7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" Nov 26 07:10:00 crc kubenswrapper[4940]: I1126 07:10:00.100669 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wrcv\" (UniqueName: \"kubernetes.io/projected/3632747e-e8d6-4971-a5bf-d07117d69ae7-kube-api-access-5wrcv\") pod \"cert-manager-cainjector-855d9ccff4-pjqx6\" (UID: \"3632747e-e8d6-4971-a5bf-d07117d69ae7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" Nov 26 07:10:00 crc kubenswrapper[4940]: I1126 07:10:00.202653 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3632747e-e8d6-4971-a5bf-d07117d69ae7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-pjqx6\" (UID: \"3632747e-e8d6-4971-a5bf-d07117d69ae7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" Nov 26 07:10:00 crc kubenswrapper[4940]: I1126 07:10:00.202885 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wrcv\" (UniqueName: \"kubernetes.io/projected/3632747e-e8d6-4971-a5bf-d07117d69ae7-kube-api-access-5wrcv\") pod \"cert-manager-cainjector-855d9ccff4-pjqx6\" (UID: \"3632747e-e8d6-4971-a5bf-d07117d69ae7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" Nov 26 07:10:00 crc kubenswrapper[4940]: I1126 07:10:00.235563 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3632747e-e8d6-4971-a5bf-d07117d69ae7-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-pjqx6\" (UID: \"3632747e-e8d6-4971-a5bf-d07117d69ae7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" Nov 26 07:10:00 crc kubenswrapper[4940]: I1126 07:10:00.238580 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wrcv\" (UniqueName: \"kubernetes.io/projected/3632747e-e8d6-4971-a5bf-d07117d69ae7-kube-api-access-5wrcv\") pod \"cert-manager-cainjector-855d9ccff4-pjqx6\" (UID: \"3632747e-e8d6-4971-a5bf-d07117d69ae7\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" Nov 26 07:10:00 crc kubenswrapper[4940]: I1126 07:10:00.453751 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" Nov 26 07:10:00 crc kubenswrapper[4940]: I1126 07:10:00.865159 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6"] Nov 26 07:10:00 crc kubenswrapper[4940]: W1126 07:10:00.875710 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3632747e_e8d6_4971_a5bf_d07117d69ae7.slice/crio-e22a7d84f5e0566cbcf7cc0c31e8dfe288ea86f4f4bb06461349939a80474439 WatchSource:0}: Error finding container e22a7d84f5e0566cbcf7cc0c31e8dfe288ea86f4f4bb06461349939a80474439: Status 404 returned error can't find the container with id e22a7d84f5e0566cbcf7cc0c31e8dfe288ea86f4f4bb06461349939a80474439 Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.152477 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-7tbl5"] Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.153631 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.155652 4940 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-2zs7c" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.161948 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-7tbl5"] Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.163436 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-7tbl5\" (UID: \"1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.163498 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbqzg\" (UniqueName: \"kubernetes.io/projected/1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5-kube-api-access-tbqzg\") pod \"cert-manager-webhook-f4fb5df64-7tbl5\" (UID: \"1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.264928 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-7tbl5\" (UID: \"1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.265263 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbqzg\" (UniqueName: \"kubernetes.io/projected/1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5-kube-api-access-tbqzg\") pod \"cert-manager-webhook-f4fb5df64-7tbl5\" (UID: \"1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.288826 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-7tbl5\" (UID: \"1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.290939 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbqzg\" (UniqueName: \"kubernetes.io/projected/1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5-kube-api-access-tbqzg\") pod \"cert-manager-webhook-f4fb5df64-7tbl5\" (UID: \"1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.472341 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.670684 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" event={"ID":"3632747e-e8d6-4971-a5bf-d07117d69ae7","Type":"ContainerStarted","Data":"e22a7d84f5e0566cbcf7cc0c31e8dfe288ea86f4f4bb06461349939a80474439"} Nov 26 07:10:01 crc kubenswrapper[4940]: I1126 07:10:01.857064 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-7tbl5"] Nov 26 07:10:02 crc kubenswrapper[4940]: I1126 07:10:02.677423 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" event={"ID":"1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5","Type":"ContainerStarted","Data":"9705c08210f8ed865164fb8c8aedcba1581a1a2cdb4b36e8fb5844bc1ee5e7a5"} Nov 26 07:10:09 crc kubenswrapper[4940]: I1126 07:10:09.718941 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" event={"ID":"1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5","Type":"ContainerStarted","Data":"ff441ec1fc08879f8501dbd51aba84eb58967989bced97ed1b32e434070eb7cd"} Nov 26 07:10:09 crc kubenswrapper[4940]: I1126 07:10:09.719539 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:09 crc kubenswrapper[4940]: I1126 07:10:09.721176 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" event={"ID":"3632747e-e8d6-4971-a5bf-d07117d69ae7","Type":"ContainerStarted","Data":"9ac83e827985174b813db315c23840531ce688342e12957a0a195482eb3f37f3"} Nov 26 07:10:09 crc kubenswrapper[4940]: I1126 07:10:09.740671 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" podStartSLOduration=2.037130995 podStartE2EDuration="8.740653844s" podCreationTimestamp="2025-11-26 07:10:01 +0000 UTC" firstStartedPulling="2025-11-26 07:10:01.861923725 +0000 UTC m=+903.382065374" lastFinishedPulling="2025-11-26 07:10:08.565446604 +0000 UTC m=+910.085588223" observedRunningTime="2025-11-26 07:10:09.735125451 +0000 UTC m=+911.255267070" watchObservedRunningTime="2025-11-26 07:10:09.740653844 +0000 UTC m=+911.260795463" Nov 26 07:10:09 crc kubenswrapper[4940]: I1126 07:10:09.754613 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" podStartSLOduration=3.085188603 podStartE2EDuration="10.754592603s" podCreationTimestamp="2025-11-26 07:09:59 +0000 UTC" firstStartedPulling="2025-11-26 07:10:00.87844131 +0000 UTC m=+902.398582929" lastFinishedPulling="2025-11-26 07:10:08.54784531 +0000 UTC m=+910.067986929" observedRunningTime="2025-11-26 07:10:09.751146205 +0000 UTC m=+911.271287834" watchObservedRunningTime="2025-11-26 07:10:09.754592603 +0000 UTC m=+911.274734232" Nov 26 07:10:16 crc kubenswrapper[4940]: I1126 07:10:16.476391 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-7tbl5" Nov 26 07:10:16 crc kubenswrapper[4940]: I1126 07:10:16.901346 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6tzhl"] Nov 26 07:10:16 crc kubenswrapper[4940]: I1126 07:10:16.902406 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-6tzhl" Nov 26 07:10:16 crc kubenswrapper[4940]: I1126 07:10:16.910112 4940 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-hzlmb" Nov 26 07:10:16 crc kubenswrapper[4940]: I1126 07:10:16.912266 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6tzhl"] Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.021321 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zgjs\" (UniqueName: \"kubernetes.io/projected/6a36e7f5-47fa-4fcf-ad99-5f538e358254-kube-api-access-5zgjs\") pod \"cert-manager-86cb77c54b-6tzhl\" (UID: \"6a36e7f5-47fa-4fcf-ad99-5f538e358254\") " pod="cert-manager/cert-manager-86cb77c54b-6tzhl" Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.021454 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a36e7f5-47fa-4fcf-ad99-5f538e358254-bound-sa-token\") pod \"cert-manager-86cb77c54b-6tzhl\" (UID: \"6a36e7f5-47fa-4fcf-ad99-5f538e358254\") " pod="cert-manager/cert-manager-86cb77c54b-6tzhl" Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.122765 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a36e7f5-47fa-4fcf-ad99-5f538e358254-bound-sa-token\") pod \"cert-manager-86cb77c54b-6tzhl\" (UID: \"6a36e7f5-47fa-4fcf-ad99-5f538e358254\") " pod="cert-manager/cert-manager-86cb77c54b-6tzhl" Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.123191 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zgjs\" (UniqueName: \"kubernetes.io/projected/6a36e7f5-47fa-4fcf-ad99-5f538e358254-kube-api-access-5zgjs\") pod \"cert-manager-86cb77c54b-6tzhl\" (UID: \"6a36e7f5-47fa-4fcf-ad99-5f538e358254\") " pod="cert-manager/cert-manager-86cb77c54b-6tzhl" Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.146737 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a36e7f5-47fa-4fcf-ad99-5f538e358254-bound-sa-token\") pod \"cert-manager-86cb77c54b-6tzhl\" (UID: \"6a36e7f5-47fa-4fcf-ad99-5f538e358254\") " pod="cert-manager/cert-manager-86cb77c54b-6tzhl" Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.147022 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zgjs\" (UniqueName: \"kubernetes.io/projected/6a36e7f5-47fa-4fcf-ad99-5f538e358254-kube-api-access-5zgjs\") pod \"cert-manager-86cb77c54b-6tzhl\" (UID: \"6a36e7f5-47fa-4fcf-ad99-5f538e358254\") " pod="cert-manager/cert-manager-86cb77c54b-6tzhl" Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.221420 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-6tzhl" Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.620989 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6tzhl"] Nov 26 07:10:17 crc kubenswrapper[4940]: I1126 07:10:17.770589 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6tzhl" event={"ID":"6a36e7f5-47fa-4fcf-ad99-5f538e358254","Type":"ContainerStarted","Data":"fca1124dd7cb1a2a5ab4c8ea49d962c76ab51dc9176083176c2542b6153a7eec"} Nov 26 07:10:18 crc kubenswrapper[4940]: I1126 07:10:18.778883 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6tzhl" event={"ID":"6a36e7f5-47fa-4fcf-ad99-5f538e358254","Type":"ContainerStarted","Data":"f368a146c400a442dd34a1c2cd892a3e1699b9248e6a14e32c2fc10eb35d5a07"} Nov 26 07:10:18 crc kubenswrapper[4940]: I1126 07:10:18.806552 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-6tzhl" podStartSLOduration=2.80652937 podStartE2EDuration="2.80652937s" podCreationTimestamp="2025-11-26 07:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:10:18.797424834 +0000 UTC m=+920.317566453" watchObservedRunningTime="2025-11-26 07:10:18.80652937 +0000 UTC m=+920.326670989" Nov 26 07:10:21 crc kubenswrapper[4940]: I1126 07:10:21.728628 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:10:21 crc kubenswrapper[4940]: I1126 07:10:21.728943 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.437554 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-snzrm"] Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.438747 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-snzrm" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.442743 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-jmwzs" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.442845 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.444025 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.451697 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-snzrm"] Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.490959 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfx2s\" (UniqueName: \"kubernetes.io/projected/72c3713d-dd39-47f8-83b8-e2e28216fe2c-kube-api-access-sfx2s\") pod \"openstack-operator-index-snzrm\" (UID: \"72c3713d-dd39-47f8-83b8-e2e28216fe2c\") " pod="openstack-operators/openstack-operator-index-snzrm" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.592291 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfx2s\" (UniqueName: \"kubernetes.io/projected/72c3713d-dd39-47f8-83b8-e2e28216fe2c-kube-api-access-sfx2s\") pod \"openstack-operator-index-snzrm\" (UID: \"72c3713d-dd39-47f8-83b8-e2e28216fe2c\") " pod="openstack-operators/openstack-operator-index-snzrm" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.612843 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfx2s\" (UniqueName: \"kubernetes.io/projected/72c3713d-dd39-47f8-83b8-e2e28216fe2c-kube-api-access-sfx2s\") pod \"openstack-operator-index-snzrm\" (UID: \"72c3713d-dd39-47f8-83b8-e2e28216fe2c\") " pod="openstack-operators/openstack-operator-index-snzrm" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.758456 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-snzrm" Nov 26 07:10:29 crc kubenswrapper[4940]: I1126 07:10:29.944601 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-snzrm"] Nov 26 07:10:30 crc kubenswrapper[4940]: I1126 07:10:30.866091 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-snzrm" event={"ID":"72c3713d-dd39-47f8-83b8-e2e28216fe2c","Type":"ContainerStarted","Data":"dd64dc23e31cd238d6226619e39a349e77a6d34606b9a093e8168f2a9d2ad429"} Nov 26 07:10:31 crc kubenswrapper[4940]: I1126 07:10:31.875668 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-snzrm" event={"ID":"72c3713d-dd39-47f8-83b8-e2e28216fe2c","Type":"ContainerStarted","Data":"4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20"} Nov 26 07:10:31 crc kubenswrapper[4940]: I1126 07:10:31.893113 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-snzrm" podStartSLOduration=2.09217165 podStartE2EDuration="2.89308604s" podCreationTimestamp="2025-11-26 07:10:29 +0000 UTC" firstStartedPulling="2025-11-26 07:10:29.954365967 +0000 UTC m=+931.474507586" lastFinishedPulling="2025-11-26 07:10:30.755280347 +0000 UTC m=+932.275421976" observedRunningTime="2025-11-26 07:10:31.892139971 +0000 UTC m=+933.412281640" watchObservedRunningTime="2025-11-26 07:10:31.89308604 +0000 UTC m=+933.413227709" Nov 26 07:10:33 crc kubenswrapper[4940]: I1126 07:10:33.621666 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-snzrm"] Nov 26 07:10:33 crc kubenswrapper[4940]: I1126 07:10:33.888467 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-snzrm" podUID="72c3713d-dd39-47f8-83b8-e2e28216fe2c" containerName="registry-server" containerID="cri-o://4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20" gracePeriod=2 Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.265791 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-snzrm" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.356469 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfx2s\" (UniqueName: \"kubernetes.io/projected/72c3713d-dd39-47f8-83b8-e2e28216fe2c-kube-api-access-sfx2s\") pod \"72c3713d-dd39-47f8-83b8-e2e28216fe2c\" (UID: \"72c3713d-dd39-47f8-83b8-e2e28216fe2c\") " Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.362790 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72c3713d-dd39-47f8-83b8-e2e28216fe2c-kube-api-access-sfx2s" (OuterVolumeSpecName: "kube-api-access-sfx2s") pod "72c3713d-dd39-47f8-83b8-e2e28216fe2c" (UID: "72c3713d-dd39-47f8-83b8-e2e28216fe2c"). InnerVolumeSpecName "kube-api-access-sfx2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.415449 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-6j69k"] Nov 26 07:10:34 crc kubenswrapper[4940]: E1126 07:10:34.415735 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72c3713d-dd39-47f8-83b8-e2e28216fe2c" containerName="registry-server" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.415749 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="72c3713d-dd39-47f8-83b8-e2e28216fe2c" containerName="registry-server" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.415892 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="72c3713d-dd39-47f8-83b8-e2e28216fe2c" containerName="registry-server" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.416299 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.430773 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6j69k"] Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.459099 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmld9\" (UniqueName: \"kubernetes.io/projected/41fd5ecb-5c59-4d84-ae08-d7090bb05b3e-kube-api-access-gmld9\") pod \"openstack-operator-index-6j69k\" (UID: \"41fd5ecb-5c59-4d84-ae08-d7090bb05b3e\") " pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.459244 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfx2s\" (UniqueName: \"kubernetes.io/projected/72c3713d-dd39-47f8-83b8-e2e28216fe2c-kube-api-access-sfx2s\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.560232 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmld9\" (UniqueName: \"kubernetes.io/projected/41fd5ecb-5c59-4d84-ae08-d7090bb05b3e-kube-api-access-gmld9\") pod \"openstack-operator-index-6j69k\" (UID: \"41fd5ecb-5c59-4d84-ae08-d7090bb05b3e\") " pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.575252 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmld9\" (UniqueName: \"kubernetes.io/projected/41fd5ecb-5c59-4d84-ae08-d7090bb05b3e-kube-api-access-gmld9\") pod \"openstack-operator-index-6j69k\" (UID: \"41fd5ecb-5c59-4d84-ae08-d7090bb05b3e\") " pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.737659 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.906456 4940 generic.go:334] "Generic (PLEG): container finished" podID="72c3713d-dd39-47f8-83b8-e2e28216fe2c" containerID="4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20" exitCode=0 Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.906653 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-snzrm" event={"ID":"72c3713d-dd39-47f8-83b8-e2e28216fe2c","Type":"ContainerDied","Data":"4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20"} Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.906678 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-snzrm" event={"ID":"72c3713d-dd39-47f8-83b8-e2e28216fe2c","Type":"ContainerDied","Data":"dd64dc23e31cd238d6226619e39a349e77a6d34606b9a093e8168f2a9d2ad429"} Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.906695 4940 scope.go:117] "RemoveContainer" containerID="4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.906790 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-snzrm" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.910773 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6j69k"] Nov 26 07:10:34 crc kubenswrapper[4940]: W1126 07:10:34.923226 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41fd5ecb_5c59_4d84_ae08_d7090bb05b3e.slice/crio-b423518fe877ba3eeeff8be268cadeec9f834e28e956fc204006cf78a4953280 WatchSource:0}: Error finding container b423518fe877ba3eeeff8be268cadeec9f834e28e956fc204006cf78a4953280: Status 404 returned error can't find the container with id b423518fe877ba3eeeff8be268cadeec9f834e28e956fc204006cf78a4953280 Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.932213 4940 scope.go:117] "RemoveContainer" containerID="4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20" Nov 26 07:10:34 crc kubenswrapper[4940]: E1126 07:10:34.932820 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20\": container with ID starting with 4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20 not found: ID does not exist" containerID="4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.932846 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20"} err="failed to get container status \"4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20\": rpc error: code = NotFound desc = could not find container \"4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20\": container with ID starting with 4a2cfd5ff25293acb301147382344175dfc16dca00d9cc4b248da748e7bbeb20 not found: ID does not exist" Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.932882 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-snzrm"] Nov 26 07:10:34 crc kubenswrapper[4940]: I1126 07:10:34.935660 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-snzrm"] Nov 26 07:10:35 crc kubenswrapper[4940]: I1126 07:10:35.178593 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72c3713d-dd39-47f8-83b8-e2e28216fe2c" path="/var/lib/kubelet/pods/72c3713d-dd39-47f8-83b8-e2e28216fe2c/volumes" Nov 26 07:10:35 crc kubenswrapper[4940]: I1126 07:10:35.913622 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6j69k" event={"ID":"41fd5ecb-5c59-4d84-ae08-d7090bb05b3e","Type":"ContainerStarted","Data":"b8ec2c672456a252eb9548e0dd682115f717f4dbcf504b08b37830efb2d72fdd"} Nov 26 07:10:35 crc kubenswrapper[4940]: I1126 07:10:35.913946 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6j69k" event={"ID":"41fd5ecb-5c59-4d84-ae08-d7090bb05b3e","Type":"ContainerStarted","Data":"b423518fe877ba3eeeff8be268cadeec9f834e28e956fc204006cf78a4953280"} Nov 26 07:10:35 crc kubenswrapper[4940]: I1126 07:10:35.929164 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-6j69k" podStartSLOduration=1.5400367799999999 podStartE2EDuration="1.929144118s" podCreationTimestamp="2025-11-26 07:10:34 +0000 UTC" firstStartedPulling="2025-11-26 07:10:34.932335323 +0000 UTC m=+936.452476942" lastFinishedPulling="2025-11-26 07:10:35.321442661 +0000 UTC m=+936.841584280" observedRunningTime="2025-11-26 07:10:35.925922717 +0000 UTC m=+937.446064356" watchObservedRunningTime="2025-11-26 07:10:35.929144118 +0000 UTC m=+937.449285737" Nov 26 07:10:44 crc kubenswrapper[4940]: I1126 07:10:44.738304 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:44 crc kubenswrapper[4940]: I1126 07:10:44.738776 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:44 crc kubenswrapper[4940]: I1126 07:10:44.764298 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:44 crc kubenswrapper[4940]: I1126 07:10:44.989883 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-6j69k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.267455 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k"] Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.271467 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.274798 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-5plmf" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.276775 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k"] Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.342813 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.342969 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.343014 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l8cb\" (UniqueName: \"kubernetes.io/projected/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-kube-api-access-6l8cb\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.444463 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.444736 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l8cb\" (UniqueName: \"kubernetes.io/projected/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-kube-api-access-6l8cb\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.444842 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.444946 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.446508 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.475601 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l8cb\" (UniqueName: \"kubernetes.io/projected/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-kube-api-access-6l8cb\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.589400 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.826255 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k"] Nov 26 07:10:48 crc kubenswrapper[4940]: W1126 07:10:48.830582 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcf304eb_07f5_4db8_b8fa_a7b71a11b1be.slice/crio-abde376d5c461fd9f8c95c07982e1e2cd00d15a0f220a7e0de0dd0a9458f1cc7 WatchSource:0}: Error finding container abde376d5c461fd9f8c95c07982e1e2cd00d15a0f220a7e0de0dd0a9458f1cc7: Status 404 returned error can't find the container with id abde376d5c461fd9f8c95c07982e1e2cd00d15a0f220a7e0de0dd0a9458f1cc7 Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.990290 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" event={"ID":"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be","Type":"ContainerStarted","Data":"4e1135f311c53505a9bc6bf40983feeea8fa54d3a409ee8f529eaa5f1be7f9eb"} Nov 26 07:10:48 crc kubenswrapper[4940]: I1126 07:10:48.990344 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" event={"ID":"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be","Type":"ContainerStarted","Data":"abde376d5c461fd9f8c95c07982e1e2cd00d15a0f220a7e0de0dd0a9458f1cc7"} Nov 26 07:10:49 crc kubenswrapper[4940]: I1126 07:10:49.997497 4940 generic.go:334] "Generic (PLEG): container finished" podID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerID="4e1135f311c53505a9bc6bf40983feeea8fa54d3a409ee8f529eaa5f1be7f9eb" exitCode=0 Nov 26 07:10:50 crc kubenswrapper[4940]: I1126 07:10:49.997557 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" event={"ID":"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be","Type":"ContainerDied","Data":"4e1135f311c53505a9bc6bf40983feeea8fa54d3a409ee8f529eaa5f1be7f9eb"} Nov 26 07:10:51 crc kubenswrapper[4940]: I1126 07:10:51.008197 4940 generic.go:334] "Generic (PLEG): container finished" podID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerID="65d8c3849733b1b5df247ca0adc86e053c49955540856705a28c89ae035a39c0" exitCode=0 Nov 26 07:10:51 crc kubenswrapper[4940]: I1126 07:10:51.008249 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" event={"ID":"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be","Type":"ContainerDied","Data":"65d8c3849733b1b5df247ca0adc86e053c49955540856705a28c89ae035a39c0"} Nov 26 07:10:51 crc kubenswrapper[4940]: I1126 07:10:51.729132 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:10:51 crc kubenswrapper[4940]: I1126 07:10:51.729204 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:10:52 crc kubenswrapper[4940]: I1126 07:10:52.019815 4940 generic.go:334] "Generic (PLEG): container finished" podID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerID="b351fd7d30db2976d0e23377acae11cde56f7b24447b873890a1722b8d166b8f" exitCode=0 Nov 26 07:10:52 crc kubenswrapper[4940]: I1126 07:10:52.019884 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" event={"ID":"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be","Type":"ContainerDied","Data":"b351fd7d30db2976d0e23377acae11cde56f7b24447b873890a1722b8d166b8f"} Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.270416 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.322504 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6l8cb\" (UniqueName: \"kubernetes.io/projected/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-kube-api-access-6l8cb\") pod \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.322613 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-bundle\") pod \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.322697 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-util\") pod \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\" (UID: \"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be\") " Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.323349 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-bundle" (OuterVolumeSpecName: "bundle") pod "fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" (UID: "fcf304eb-07f5-4db8-b8fa-a7b71a11b1be"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.324669 4940 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.331623 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-kube-api-access-6l8cb" (OuterVolumeSpecName: "kube-api-access-6l8cb") pod "fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" (UID: "fcf304eb-07f5-4db8-b8fa-a7b71a11b1be"). InnerVolumeSpecName "kube-api-access-6l8cb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.344100 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-util" (OuterVolumeSpecName: "util") pod "fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" (UID: "fcf304eb-07f5-4db8-b8fa-a7b71a11b1be"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.426573 4940 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-util\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:53 crc kubenswrapper[4940]: I1126 07:10:53.426624 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6l8cb\" (UniqueName: \"kubernetes.io/projected/fcf304eb-07f5-4db8-b8fa-a7b71a11b1be-kube-api-access-6l8cb\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:54 crc kubenswrapper[4940]: I1126 07:10:54.032458 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" event={"ID":"fcf304eb-07f5-4db8-b8fa-a7b71a11b1be","Type":"ContainerDied","Data":"abde376d5c461fd9f8c95c07982e1e2cd00d15a0f220a7e0de0dd0a9458f1cc7"} Nov 26 07:10:54 crc kubenswrapper[4940]: I1126 07:10:54.032507 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abde376d5c461fd9f8c95c07982e1e2cd00d15a0f220a7e0de0dd0a9458f1cc7" Nov 26 07:10:54 crc kubenswrapper[4940]: I1126 07:10:54.032509 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.093444 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9"] Nov 26 07:10:57 crc kubenswrapper[4940]: E1126 07:10:57.093904 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerName="util" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.093915 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerName="util" Nov 26 07:10:57 crc kubenswrapper[4940]: E1126 07:10:57.093928 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerName="extract" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.093933 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerName="extract" Nov 26 07:10:57 crc kubenswrapper[4940]: E1126 07:10:57.093951 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerName="pull" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.093958 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerName="pull" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.094074 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcf304eb-07f5-4db8-b8fa-a7b71a11b1be" containerName="extract" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.094703 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.097189 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-28dz5" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.123502 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9"] Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.185467 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7chp\" (UniqueName: \"kubernetes.io/projected/192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea-kube-api-access-l7chp\") pod \"openstack-operator-controller-operator-5675dd9766-49nb9\" (UID: \"192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.287024 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7chp\" (UniqueName: \"kubernetes.io/projected/192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea-kube-api-access-l7chp\") pod \"openstack-operator-controller-operator-5675dd9766-49nb9\" (UID: \"192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.310725 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7chp\" (UniqueName: \"kubernetes.io/projected/192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea-kube-api-access-l7chp\") pod \"openstack-operator-controller-operator-5675dd9766-49nb9\" (UID: \"192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.412598 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:10:57 crc kubenswrapper[4940]: I1126 07:10:57.868034 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9"] Nov 26 07:10:58 crc kubenswrapper[4940]: I1126 07:10:58.056027 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" event={"ID":"192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea","Type":"ContainerStarted","Data":"5ac5921b32416cfe9d172bc9d6b2b779819f24b96327520a2416c6d43fd075d2"} Nov 26 07:11:03 crc kubenswrapper[4940]: I1126 07:11:03.089976 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" event={"ID":"192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea","Type":"ContainerStarted","Data":"df807c7ebe9dbedf3d4860022ba1efc7abaeb3f07501f30542d513f336d18784"} Nov 26 07:11:03 crc kubenswrapper[4940]: I1126 07:11:03.092113 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:11:03 crc kubenswrapper[4940]: I1126 07:11:03.124476 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" podStartSLOduration=2.015868681 podStartE2EDuration="6.124459932s" podCreationTimestamp="2025-11-26 07:10:57 +0000 UTC" firstStartedPulling="2025-11-26 07:10:57.872266465 +0000 UTC m=+959.392408084" lastFinishedPulling="2025-11-26 07:11:01.980857716 +0000 UTC m=+963.500999335" observedRunningTime="2025-11-26 07:11:03.121898791 +0000 UTC m=+964.642040420" watchObservedRunningTime="2025-11-26 07:11:03.124459932 +0000 UTC m=+964.644601561" Nov 26 07:11:07 crc kubenswrapper[4940]: I1126 07:11:07.416486 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:11:21 crc kubenswrapper[4940]: I1126 07:11:21.728549 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:11:21 crc kubenswrapper[4940]: I1126 07:11:21.728979 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:11:21 crc kubenswrapper[4940]: I1126 07:11:21.729025 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:11:21 crc kubenswrapper[4940]: I1126 07:11:21.729597 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a590ce9820b409bfd278396384f5037a9a72b69b72e14ce99d91eca689514af5"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:11:21 crc kubenswrapper[4940]: I1126 07:11:21.729648 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://a590ce9820b409bfd278396384f5037a9a72b69b72e14ce99d91eca689514af5" gracePeriod=600 Nov 26 07:11:22 crc kubenswrapper[4940]: I1126 07:11:22.198682 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="a590ce9820b409bfd278396384f5037a9a72b69b72e14ce99d91eca689514af5" exitCode=0 Nov 26 07:11:22 crc kubenswrapper[4940]: I1126 07:11:22.199011 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"a590ce9820b409bfd278396384f5037a9a72b69b72e14ce99d91eca689514af5"} Nov 26 07:11:22 crc kubenswrapper[4940]: I1126 07:11:22.199041 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"0393c811a7e2a741f1c1d69f74428b6878add21bd4ed185abcf620da55c2d4a4"} Nov 26 07:11:22 crc kubenswrapper[4940]: I1126 07:11:22.199160 4940 scope.go:117] "RemoveContainer" containerID="4831b08e520666c63a46d4332bc996f659d9f6b347ced8e4ef4908839bbfa56c" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.570777 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.573367 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.575632 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-gmxv9" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.578646 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.579891 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.582426 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-nw2st" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.589971 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-6qpkm"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.590875 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.596333 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-9kxgd" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.599669 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.617962 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.620884 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-6qpkm"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.624244 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.625261 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.627259 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-5552r" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.640675 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.641877 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.647992 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-d2d59" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.648034 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.650108 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.655601 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.679502 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.679778 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.692766 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-fd8hz" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.702530 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67f9t\" (UniqueName: \"kubernetes.io/projected/86296495-65bc-46a3-a775-621a7bf1745f-kube-api-access-67f9t\") pod \"barbican-operator-controller-manager-7b64f4fb85-hxbtm\" (UID: \"86296495-65bc-46a3-a775-621a7bf1745f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.702590 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxdw8\" (UniqueName: \"kubernetes.io/projected/7ce6057b-0d67-48fc-9d34-b6574eda6978-kube-api-access-qxdw8\") pod \"cinder-operator-controller-manager-6b7f75547b-d75td\" (UID: \"7ce6057b-0d67-48fc-9d34-b6574eda6978\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.702616 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7stqh\" (UniqueName: \"kubernetes.io/projected/9527c833-8bce-440b-b4e5-ca0a08ef7d28-kube-api-access-7stqh\") pod \"designate-operator-controller-manager-955677c94-6qpkm\" (UID: \"9527c833-8bce-440b-b4e5-ca0a08ef7d28\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.705950 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-txjbc"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.706868 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.710064 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.710419 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-7rfvc" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.751561 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.753026 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.754665 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-wtdks" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.760101 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-txjbc"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.812892 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.817962 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96b2c\" (UniqueName: \"kubernetes.io/projected/51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5-kube-api-access-96b2c\") pod \"heat-operator-controller-manager-5b77f656f-s2cvl\" (UID: \"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.818219 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67f9t\" (UniqueName: \"kubernetes.io/projected/86296495-65bc-46a3-a775-621a7bf1745f-kube-api-access-67f9t\") pod \"barbican-operator-controller-manager-7b64f4fb85-hxbtm\" (UID: \"86296495-65bc-46a3-a775-621a7bf1745f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.818263 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4brkk\" (UniqueName: \"kubernetes.io/projected/327827f9-8ca3-4d2e-8478-ace9eb784b21-kube-api-access-4brkk\") pod \"glance-operator-controller-manager-589cbd6b5b-55vwf\" (UID: \"327827f9-8ca3-4d2e-8478-ace9eb784b21\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.818355 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxdw8\" (UniqueName: \"kubernetes.io/projected/7ce6057b-0d67-48fc-9d34-b6574eda6978-kube-api-access-qxdw8\") pod \"cinder-operator-controller-manager-6b7f75547b-d75td\" (UID: \"7ce6057b-0d67-48fc-9d34-b6574eda6978\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.818398 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7stqh\" (UniqueName: \"kubernetes.io/projected/9527c833-8bce-440b-b4e5-ca0a08ef7d28-kube-api-access-7stqh\") pod \"designate-operator-controller-manager-955677c94-6qpkm\" (UID: \"9527c833-8bce-440b-b4e5-ca0a08ef7d28\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.827282 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hl7l\" (UniqueName: \"kubernetes.io/projected/b53f82af-849a-47b4-a878-676055ad11ef-kube-api-access-5hl7l\") pod \"horizon-operator-controller-manager-5d494799bf-97dsx\" (UID: \"b53f82af-849a-47b4-a878-676055ad11ef\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.827451 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgnjl\" (UniqueName: \"kubernetes.io/projected/b805b33b-94ee-4037-907b-339573471ddb-kube-api-access-cgnjl\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.853678 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.854526 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.856604 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-f7795" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.858520 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.872239 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxdw8\" (UniqueName: \"kubernetes.io/projected/7ce6057b-0d67-48fc-9d34-b6574eda6978-kube-api-access-qxdw8\") pod \"cinder-operator-controller-manager-6b7f75547b-d75td\" (UID: \"7ce6057b-0d67-48fc-9d34-b6574eda6978\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.891725 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.893019 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.895743 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67f9t\" (UniqueName: \"kubernetes.io/projected/86296495-65bc-46a3-a775-621a7bf1745f-kube-api-access-67f9t\") pod \"barbican-operator-controller-manager-7b64f4fb85-hxbtm\" (UID: \"86296495-65bc-46a3-a775-621a7bf1745f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.896059 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-xxxkf" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.903131 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.914892 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7stqh\" (UniqueName: \"kubernetes.io/projected/9527c833-8bce-440b-b4e5-ca0a08ef7d28-kube-api-access-7stqh\") pod \"designate-operator-controller-manager-955677c94-6qpkm\" (UID: \"9527c833-8bce-440b-b4e5-ca0a08ef7d28\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.919182 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.926672 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.955032 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96b2c\" (UniqueName: \"kubernetes.io/projected/51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5-kube-api-access-96b2c\") pod \"heat-operator-controller-manager-5b77f656f-s2cvl\" (UID: \"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.955117 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4brkk\" (UniqueName: \"kubernetes.io/projected/327827f9-8ca3-4d2e-8478-ace9eb784b21-kube-api-access-4brkk\") pod \"glance-operator-controller-manager-589cbd6b5b-55vwf\" (UID: \"327827f9-8ca3-4d2e-8478-ace9eb784b21\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.955157 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69l59\" (UniqueName: \"kubernetes.io/projected/0eca9bcc-5909-48e2-927e-b059359977d5-kube-api-access-69l59\") pod \"ironic-operator-controller-manager-67cb4dc6d4-ht6zr\" (UID: \"0eca9bcc-5909-48e2-927e-b059359977d5\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.955191 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hl7l\" (UniqueName: \"kubernetes.io/projected/b53f82af-849a-47b4-a878-676055ad11ef-kube-api-access-5hl7l\") pod \"horizon-operator-controller-manager-5d494799bf-97dsx\" (UID: \"b53f82af-849a-47b4-a878-676055ad11ef\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.955258 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgnjl\" (UniqueName: \"kubernetes.io/projected/b805b33b-94ee-4037-907b-339573471ddb-kube-api-access-cgnjl\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.955284 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:24 crc kubenswrapper[4940]: E1126 07:11:24.955427 4940 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:24 crc kubenswrapper[4940]: E1126 07:11:24.955488 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert podName:b805b33b-94ee-4037-907b-339573471ddb nodeName:}" failed. No retries permitted until 2025-11-26 07:11:25.455467806 +0000 UTC m=+986.975609425 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert") pod "infra-operator-controller-manager-57548d458d-txjbc" (UID: "b805b33b-94ee-4037-907b-339573471ddb") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.971116 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.972485 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.975246 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn"] Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.976649 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hl7l\" (UniqueName: \"kubernetes.io/projected/b53f82af-849a-47b4-a878-676055ad11ef-kube-api-access-5hl7l\") pod \"horizon-operator-controller-manager-5d494799bf-97dsx\" (UID: \"b53f82af-849a-47b4-a878-676055ad11ef\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.977160 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-6r6h6" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.982174 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgnjl\" (UniqueName: \"kubernetes.io/projected/b805b33b-94ee-4037-907b-339573471ddb-kube-api-access-cgnjl\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.983715 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4brkk\" (UniqueName: \"kubernetes.io/projected/327827f9-8ca3-4d2e-8478-ace9eb784b21-kube-api-access-4brkk\") pod \"glance-operator-controller-manager-589cbd6b5b-55vwf\" (UID: \"327827f9-8ca3-4d2e-8478-ace9eb784b21\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:11:24 crc kubenswrapper[4940]: I1126 07:11:24.995916 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96b2c\" (UniqueName: \"kubernetes.io/projected/51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5-kube-api-access-96b2c\") pod \"heat-operator-controller-manager-5b77f656f-s2cvl\" (UID: \"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.002794 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.009464 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.016029 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.019259 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-fxhxc" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.028108 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.029353 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.034367 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-9prwq" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.034446 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.039565 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.048600 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.049893 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.054052 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.054128 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-xb29d" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.055093 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.057094 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-m2568" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.057781 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.065084 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.065507 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5842\" (UniqueName: \"kubernetes.io/projected/e46c7b1d-e02f-4807-a650-1038eba64162-kube-api-access-d5842\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-qwlpn\" (UID: \"e46c7b1d-e02f-4807-a650-1038eba64162\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.065563 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmllf\" (UniqueName: \"kubernetes.io/projected/e4255a56-ed59-4cad-90a4-91abb39144d4-kube-api-access-lmllf\") pod \"keystone-operator-controller-manager-7b4567c7cf-xbbg2\" (UID: \"e4255a56-ed59-4cad-90a4-91abb39144d4\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.066642 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69l59\" (UniqueName: \"kubernetes.io/projected/0eca9bcc-5909-48e2-927e-b059359977d5-kube-api-access-69l59\") pod \"ironic-operator-controller-manager-67cb4dc6d4-ht6zr\" (UID: \"0eca9bcc-5909-48e2-927e-b059359977d5\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.066669 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-948fs\" (UniqueName: \"kubernetes.io/projected/5c068c7e-f13c-45ca-b161-e590eefdd568-kube-api-access-948fs\") pod \"nova-operator-controller-manager-79556f57fc-lzctw\" (UID: \"5c068c7e-f13c-45ca-b161-e590eefdd568\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.066690 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx44x\" (UniqueName: \"kubernetes.io/projected/15d11cf9-51e8-4f1e-880e-86d9bba60224-kube-api-access-mx44x\") pod \"octavia-operator-controller-manager-64cdc6ff96-5p8hf\" (UID: \"15d11cf9-51e8-4f1e-880e-86d9bba60224\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.066726 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbrtm\" (UniqueName: \"kubernetes.io/projected/e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4-kube-api-access-pbrtm\") pod \"ovn-operator-controller-manager-56897c768d-jxxwj\" (UID: \"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.066960 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wh4q\" (UniqueName: \"kubernetes.io/projected/2b2e7f46-8ad4-4361-8e95-76aa1e091665-kube-api-access-6wh4q\") pod \"neutron-operator-controller-manager-6fdcddb789-x9ffm\" (UID: \"2b2e7f46-8ad4-4361-8e95-76aa1e091665\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.067115 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fvxf\" (UniqueName: \"kubernetes.io/projected/68cd61b8-efae-4aef-bd7a-3e90201b5809-kube-api-access-6fvxf\") pod \"manila-operator-controller-manager-5d499bf58b-qsrc5\" (UID: \"68cd61b8-efae-4aef-bd7a-3e90201b5809\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.072943 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.077980 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.083521 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.083669 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-58mnz" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.090502 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.101555 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69l59\" (UniqueName: \"kubernetes.io/projected/0eca9bcc-5909-48e2-927e-b059359977d5-kube-api-access-69l59\") pod \"ironic-operator-controller-manager-67cb4dc6d4-ht6zr\" (UID: \"0eca9bcc-5909-48e2-927e-b059359977d5\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.101637 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.103068 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.107285 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-gmc8n" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.107316 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.112159 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.115592 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.119213 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-qm25m" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.129573 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.133751 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.135993 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.139497 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.140919 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-mjg56" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.164334 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.165592 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.167345 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-9wqr8" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168449 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxwcl\" (UniqueName: \"kubernetes.io/projected/5ac0ef91-42dc-4bed-b5bc-4c668b3249cc-kube-api-access-xxwcl\") pod \"swift-operator-controller-manager-d77b94747-nwpjs\" (UID: \"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168495 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88c2j\" (UniqueName: \"kubernetes.io/projected/7fcb4d96-f7a7-4ead-a820-db2eb2785a87-kube-api-access-88c2j\") pod \"telemetry-operator-controller-manager-76cc84c6bb-jgp88\" (UID: \"7fcb4d96-f7a7-4ead-a820-db2eb2785a87\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168529 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5842\" (UniqueName: \"kubernetes.io/projected/e46c7b1d-e02f-4807-a650-1038eba64162-kube-api-access-d5842\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-qwlpn\" (UID: \"e46c7b1d-e02f-4807-a650-1038eba64162\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168559 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjbm5\" (UniqueName: \"kubernetes.io/projected/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-kube-api-access-kjbm5\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168587 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmllf\" (UniqueName: \"kubernetes.io/projected/e4255a56-ed59-4cad-90a4-91abb39144d4-kube-api-access-lmllf\") pod \"keystone-operator-controller-manager-7b4567c7cf-xbbg2\" (UID: \"e4255a56-ed59-4cad-90a4-91abb39144d4\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168625 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-948fs\" (UniqueName: \"kubernetes.io/projected/5c068c7e-f13c-45ca-b161-e590eefdd568-kube-api-access-948fs\") pod \"nova-operator-controller-manager-79556f57fc-lzctw\" (UID: \"5c068c7e-f13c-45ca-b161-e590eefdd568\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168648 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx44x\" (UniqueName: \"kubernetes.io/projected/15d11cf9-51e8-4f1e-880e-86d9bba60224-kube-api-access-mx44x\") pod \"octavia-operator-controller-manager-64cdc6ff96-5p8hf\" (UID: \"15d11cf9-51e8-4f1e-880e-86d9bba60224\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168695 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbrtm\" (UniqueName: \"kubernetes.io/projected/e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4-kube-api-access-pbrtm\") pod \"ovn-operator-controller-manager-56897c768d-jxxwj\" (UID: \"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.168722 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wh4q\" (UniqueName: \"kubernetes.io/projected/2b2e7f46-8ad4-4361-8e95-76aa1e091665-kube-api-access-6wh4q\") pod \"neutron-operator-controller-manager-6fdcddb789-x9ffm\" (UID: \"2b2e7f46-8ad4-4361-8e95-76aa1e091665\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.186997 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfmh8\" (UniqueName: \"kubernetes.io/projected/7756325b-5cc4-4eb6-ae14-5f71924c3413-kube-api-access-dfmh8\") pod \"placement-operator-controller-manager-57988cc5b5-2ddr6\" (UID: \"7756325b-5cc4-4eb6-ae14-5f71924c3413\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.187081 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fvxf\" (UniqueName: \"kubernetes.io/projected/68cd61b8-efae-4aef-bd7a-3e90201b5809-kube-api-access-6fvxf\") pod \"manila-operator-controller-manager-5d499bf58b-qsrc5\" (UID: \"68cd61b8-efae-4aef-bd7a-3e90201b5809\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.187123 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njskc\" (UniqueName: \"kubernetes.io/projected/53456206-67c0-4503-b72f-909a3ec07b2a-kube-api-access-njskc\") pod \"test-operator-controller-manager-5cd6c7f4c8-4j248\" (UID: \"53456206-67c0-4503-b72f-909a3ec07b2a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.187147 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.194708 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.215478 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fvxf\" (UniqueName: \"kubernetes.io/projected/68cd61b8-efae-4aef-bd7a-3e90201b5809-kube-api-access-6fvxf\") pod \"manila-operator-controller-manager-5d499bf58b-qsrc5\" (UID: \"68cd61b8-efae-4aef-bd7a-3e90201b5809\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.215465 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wh4q\" (UniqueName: \"kubernetes.io/projected/2b2e7f46-8ad4-4361-8e95-76aa1e091665-kube-api-access-6wh4q\") pod \"neutron-operator-controller-manager-6fdcddb789-x9ffm\" (UID: \"2b2e7f46-8ad4-4361-8e95-76aa1e091665\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.216998 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.218192 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmllf\" (UniqueName: \"kubernetes.io/projected/e4255a56-ed59-4cad-90a4-91abb39144d4-kube-api-access-lmllf\") pod \"keystone-operator-controller-manager-7b4567c7cf-xbbg2\" (UID: \"e4255a56-ed59-4cad-90a4-91abb39144d4\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.220793 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5842\" (UniqueName: \"kubernetes.io/projected/e46c7b1d-e02f-4807-a650-1038eba64162-kube-api-access-d5842\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-qwlpn\" (UID: \"e46c7b1d-e02f-4807-a650-1038eba64162\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.222468 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-948fs\" (UniqueName: \"kubernetes.io/projected/5c068c7e-f13c-45ca-b161-e590eefdd568-kube-api-access-948fs\") pod \"nova-operator-controller-manager-79556f57fc-lzctw\" (UID: \"5c068c7e-f13c-45ca-b161-e590eefdd568\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.223621 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx44x\" (UniqueName: \"kubernetes.io/projected/15d11cf9-51e8-4f1e-880e-86d9bba60224-kube-api-access-mx44x\") pod \"octavia-operator-controller-manager-64cdc6ff96-5p8hf\" (UID: \"15d11cf9-51e8-4f1e-880e-86d9bba60224\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.230616 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbrtm\" (UniqueName: \"kubernetes.io/projected/e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4-kube-api-access-pbrtm\") pod \"ovn-operator-controller-manager-56897c768d-jxxwj\" (UID: \"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.243986 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.260929 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.272898 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.288762 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfmh8\" (UniqueName: \"kubernetes.io/projected/7756325b-5cc4-4eb6-ae14-5f71924c3413-kube-api-access-dfmh8\") pod \"placement-operator-controller-manager-57988cc5b5-2ddr6\" (UID: \"7756325b-5cc4-4eb6-ae14-5f71924c3413\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.288818 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njskc\" (UniqueName: \"kubernetes.io/projected/53456206-67c0-4503-b72f-909a3ec07b2a-kube-api-access-njskc\") pod \"test-operator-controller-manager-5cd6c7f4c8-4j248\" (UID: \"53456206-67c0-4503-b72f-909a3ec07b2a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.288844 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.288873 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxwcl\" (UniqueName: \"kubernetes.io/projected/5ac0ef91-42dc-4bed-b5bc-4c668b3249cc-kube-api-access-xxwcl\") pod \"swift-operator-controller-manager-d77b94747-nwpjs\" (UID: \"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.288915 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88c2j\" (UniqueName: \"kubernetes.io/projected/7fcb4d96-f7a7-4ead-a820-db2eb2785a87-kube-api-access-88c2j\") pod \"telemetry-operator-controller-manager-76cc84c6bb-jgp88\" (UID: \"7fcb4d96-f7a7-4ead-a820-db2eb2785a87\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.288989 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjbm5\" (UniqueName: \"kubernetes.io/projected/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-kube-api-access-kjbm5\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.293484 4940 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.293541 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert podName:869e95c8-91e8-4b5a-8eda-35c045ee8cbe nodeName:}" failed. No retries permitted until 2025-11-26 07:11:25.793524688 +0000 UTC m=+987.313666307 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" (UID: "869e95c8-91e8-4b5a-8eda-35c045ee8cbe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.294075 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.319161 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.320365 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.320612 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.339027 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.339384 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.343944 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.344553 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-4l8m6" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.365674 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.367777 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjbm5\" (UniqueName: \"kubernetes.io/projected/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-kube-api-access-kjbm5\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.368832 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njskc\" (UniqueName: \"kubernetes.io/projected/53456206-67c0-4503-b72f-909a3ec07b2a-kube-api-access-njskc\") pod \"test-operator-controller-manager-5cd6c7f4c8-4j248\" (UID: \"53456206-67c0-4503-b72f-909a3ec07b2a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.369742 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfmh8\" (UniqueName: \"kubernetes.io/projected/7756325b-5cc4-4eb6-ae14-5f71924c3413-kube-api-access-dfmh8\") pod \"placement-operator-controller-manager-57988cc5b5-2ddr6\" (UID: \"7756325b-5cc4-4eb6-ae14-5f71924c3413\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.369861 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxwcl\" (UniqueName: \"kubernetes.io/projected/5ac0ef91-42dc-4bed-b5bc-4c668b3249cc-kube-api-access-xxwcl\") pod \"swift-operator-controller-manager-d77b94747-nwpjs\" (UID: \"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.371121 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88c2j\" (UniqueName: \"kubernetes.io/projected/7fcb4d96-f7a7-4ead-a820-db2eb2785a87-kube-api-access-88c2j\") pod \"telemetry-operator-controller-manager-76cc84c6bb-jgp88\" (UID: \"7fcb4d96-f7a7-4ead-a820-db2eb2785a87\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.376711 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.381942 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.393292 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4xsl\" (UniqueName: \"kubernetes.io/projected/67bde2c7-9e64-469e-b400-071b32f065da-kube-api-access-t4xsl\") pod \"watcher-operator-controller-manager-656dcb59d4-vcr8t\" (UID: \"67bde2c7-9e64-469e-b400-071b32f065da\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.399335 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.423862 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.440291 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.441142 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.442205 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.444628 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-zt67h" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.444849 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.445795 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.449452 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.467904 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.478897 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.479878 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.488990 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-mf8lc" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.494783 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.494886 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgd9m\" (UniqueName: \"kubernetes.io/projected/df395369-43ff-4cd2-af6e-60a9a96a4d66-kube-api-access-zgd9m\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.494963 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.494998 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qgv4\" (UniqueName: \"kubernetes.io/projected/e8c9bb46-a618-437a-914b-6cb9c1ede58c-kube-api-access-7qgv4\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zrxd8\" (UID: \"e8c9bb46-a618-437a-914b-6cb9c1ede58c\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.495082 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.495110 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4xsl\" (UniqueName: \"kubernetes.io/projected/67bde2c7-9e64-469e-b400-071b32f065da-kube-api-access-t4xsl\") pod \"watcher-operator-controller-manager-656dcb59d4-vcr8t\" (UID: \"67bde2c7-9e64-469e-b400-071b32f065da\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.495220 4940 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.495266 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert podName:b805b33b-94ee-4037-907b-339573471ddb nodeName:}" failed. No retries permitted until 2025-11-26 07:11:26.495251476 +0000 UTC m=+988.015393095 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert") pod "infra-operator-controller-manager-57548d458d-txjbc" (UID: "b805b33b-94ee-4037-907b-339573471ddb") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.503017 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.518747 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4xsl\" (UniqueName: \"kubernetes.io/projected/67bde2c7-9e64-469e-b400-071b32f065da-kube-api-access-t4xsl\") pod \"watcher-operator-controller-manager-656dcb59d4-vcr8t\" (UID: \"67bde2c7-9e64-469e-b400-071b32f065da\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.540290 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.588692 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.608116 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.608273 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgd9m\" (UniqueName: \"kubernetes.io/projected/df395369-43ff-4cd2-af6e-60a9a96a4d66-kube-api-access-zgd9m\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.608338 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.608373 4940 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.608389 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qgv4\" (UniqueName: \"kubernetes.io/projected/e8c9bb46-a618-437a-914b-6cb9c1ede58c-kube-api-access-7qgv4\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zrxd8\" (UID: \"e8c9bb46-a618-437a-914b-6cb9c1ede58c\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.608450 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:26.108424474 +0000 UTC m=+987.628566083 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "webhook-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.608601 4940 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.608626 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:26.108619681 +0000 UTC m=+987.628761300 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "metrics-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.625436 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgd9m\" (UniqueName: \"kubernetes.io/projected/df395369-43ff-4cd2-af6e-60a9a96a4d66-kube-api-access-zgd9m\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.626176 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qgv4\" (UniqueName: \"kubernetes.io/projected/e8c9bb46-a618-437a-914b-6cb9c1ede58c-kube-api-access-7qgv4\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zrxd8\" (UID: \"e8c9bb46-a618-437a-914b-6cb9c1ede58c\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.675347 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.801967 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.812868 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.813065 4940 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: E1126 07:11:25.813143 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert podName:869e95c8-91e8-4b5a-8eda-35c045ee8cbe nodeName:}" failed. No retries permitted until 2025-11-26 07:11:26.813120506 +0000 UTC m=+988.333262125 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" (UID: "869e95c8-91e8-4b5a-8eda-35c045ee8cbe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.821086 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx"] Nov 26 07:11:25 crc kubenswrapper[4940]: I1126 07:11:25.825518 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.006075 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-6qpkm"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.022383 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.052305 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.126913 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.126986 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.127148 4940 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.127202 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:27.127183477 +0000 UTC m=+988.647325096 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "metrics-server-cert" not found Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.127257 4940 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.127340 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:27.127313352 +0000 UTC m=+988.647454971 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "webhook-server-cert" not found Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.143652 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2"] Nov 26 07:11:26 crc kubenswrapper[4940]: W1126 07:11:26.241123 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4255a56_ed59_4cad_90a4_91abb39144d4.slice/crio-a7a8327bb581c13a735d993f90432f15b4f9a5882fc9d587b79fc5004677976a WatchSource:0}: Error finding container a7a8327bb581c13a735d993f90432f15b4f9a5882fc9d587b79fc5004677976a: Status 404 returned error can't find the container with id a7a8327bb581c13a735d993f90432f15b4f9a5882fc9d587b79fc5004677976a Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.254926 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" event={"ID":"86296495-65bc-46a3-a775-621a7bf1745f","Type":"ContainerStarted","Data":"8db8d1fb253f3937e70ec5315420cafe6985d5c475f12d08648b1c909683f15c"} Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.259078 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" event={"ID":"b53f82af-849a-47b4-a878-676055ad11ef","Type":"ContainerStarted","Data":"1876523024a6d0d6732095010e76c8ae52a6ce9407f28cbb72f6dfd687620954"} Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.265705 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" event={"ID":"9527c833-8bce-440b-b4e5-ca0a08ef7d28","Type":"ContainerStarted","Data":"a8e7b39efac519338d7af749bd04afe1c7717beee7587b4588064a61c58c8176"} Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.267414 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" event={"ID":"327827f9-8ca3-4d2e-8478-ace9eb784b21","Type":"ContainerStarted","Data":"33a4f3e21b441eed63964555583a79b48ebcc9acc8f5d8bada6b14d7a8b36bf7"} Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.273659 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" event={"ID":"e4255a56-ed59-4cad-90a4-91abb39144d4","Type":"ContainerStarted","Data":"a7a8327bb581c13a735d993f90432f15b4f9a5882fc9d587b79fc5004677976a"} Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.290896 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" event={"ID":"7ce6057b-0d67-48fc-9d34-b6574eda6978","Type":"ContainerStarted","Data":"cc8068754bdc7f1c79f024c267c3bf81b77380a0484526a17e9b3101e195fca3"} Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.292304 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" event={"ID":"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5","Type":"ContainerStarted","Data":"98786aab34db0c903603e544c6401aa9e97063e017e5de84557fa17449411411"} Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.529571 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.535147 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.535283 4940 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.535352 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert podName:b805b33b-94ee-4037-907b-339573471ddb nodeName:}" failed. No retries permitted until 2025-11-26 07:11:28.535334922 +0000 UTC m=+990.055476531 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert") pod "infra-operator-controller-manager-57548d458d-txjbc" (UID: "b805b33b-94ee-4037-907b-339573471ddb") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:26 crc kubenswrapper[4940]: W1126 07:11:26.545024 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b2e7f46_8ad4_4361_8e95_76aa1e091665.slice/crio-2e783f03681bc50132bd769a7b2ce2a9af99778dd8a8c85408d4c47de7a3691d WatchSource:0}: Error finding container 2e783f03681bc50132bd769a7b2ce2a9af99778dd8a8c85408d4c47de7a3691d: Status 404 returned error can't find the container with id 2e783f03681bc50132bd769a7b2ce2a9af99778dd8a8c85408d4c47de7a3691d Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.697212 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.704629 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw"] Nov 26 07:11:26 crc kubenswrapper[4940]: W1126 07:11:26.717335 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c068c7e_f13c_45ca_b161_e590eefdd568.slice/crio-fc31a5c69125f9cb0f363d75ebda9d75ab3f5aefd2aca2a2800da0b3fe87e0d8 WatchSource:0}: Error finding container fc31a5c69125f9cb0f363d75ebda9d75ab3f5aefd2aca2a2800da0b3fe87e0d8: Status 404 returned error can't find the container with id fc31a5c69125f9cb0f363d75ebda9d75ab3f5aefd2aca2a2800da0b3fe87e0d8 Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.725474 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.758657 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.790724 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj"] Nov 26 07:11:26 crc kubenswrapper[4940]: W1126 07:11:26.796290 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53456206_67c0_4503_b72f_909a3ec07b2a.slice/crio-f0a69a5d68ec661eed3960a30da1d17b7d18f10ab0cddb0e88731ea403b0b574 WatchSource:0}: Error finding container f0a69a5d68ec661eed3960a30da1d17b7d18f10ab0cddb0e88731ea403b0b574: Status 404 returned error can't find the container with id f0a69a5d68ec661eed3960a30da1d17b7d18f10ab0cddb0e88731ea403b0b574 Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.803327 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf"] Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.818857 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6fvxf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-qsrc5_openstack-operators(68cd61b8-efae-4aef-bd7a-3e90201b5809): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.818900 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5"] Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.820884 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6fvxf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-qsrc5_openstack-operators(68cd61b8-efae-4aef-bd7a-3e90201b5809): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.821157 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7qgv4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-zrxd8_openstack-operators(e8c9bb46-a618-437a-914b-6cb9c1ede58c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.822319 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" podUID="e8c9bb46-a618-437a-914b-6cb9c1ede58c" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.822341 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" podUID="68cd61b8-efae-4aef-bd7a-3e90201b5809" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.831509 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xxwcl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-nwpjs_openstack-operators(5ac0ef91-42dc-4bed-b5bc-4c668b3249cc): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.832011 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs"] Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.836938 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xxwcl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-nwpjs_openstack-operators(5ac0ef91-42dc-4bed-b5bc-4c668b3249cc): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.839580 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" podUID="5ac0ef91-42dc-4bed-b5bc-4c668b3249cc" Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.840855 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.841989 4940 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.842106 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert podName:869e95c8-91e8-4b5a-8eda-35c045ee8cbe nodeName:}" failed. No retries permitted until 2025-11-26 07:11:28.842084201 +0000 UTC m=+990.362225860 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" (UID: "869e95c8-91e8-4b5a-8eda-35c045ee8cbe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.843854 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.852008 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.926223 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t"] Nov 26 07:11:26 crc kubenswrapper[4940]: I1126 07:11:26.934028 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88"] Nov 26 07:11:26 crc kubenswrapper[4940]: W1126 07:11:26.945051 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7fcb4d96_f7a7_4ead_a820_db2eb2785a87.slice/crio-8cb7f4c2cb05541c4df16e408e8ae18e97e7646b81302394908278a190d2691d WatchSource:0}: Error finding container 8cb7f4c2cb05541c4df16e408e8ae18e97e7646b81302394908278a190d2691d: Status 404 returned error can't find the container with id 8cb7f4c2cb05541c4df16e408e8ae18e97e7646b81302394908278a190d2691d Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.947998 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-88c2j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-jgp88_openstack-operators(7fcb4d96-f7a7-4ead-a820-db2eb2785a87): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.950190 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-88c2j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-jgp88_openstack-operators(7fcb4d96-f7a7-4ead-a820-db2eb2785a87): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:11:26 crc kubenswrapper[4940]: E1126 07:11:26.951494 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" podUID="7fcb4d96-f7a7-4ead-a820-db2eb2785a87" Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.146027 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.146157 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:27 crc kubenswrapper[4940]: E1126 07:11:27.146360 4940 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:11:27 crc kubenswrapper[4940]: E1126 07:11:27.146421 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:29.146404262 +0000 UTC m=+990.666545881 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "metrics-server-cert" not found Nov 26 07:11:27 crc kubenswrapper[4940]: E1126 07:11:27.146732 4940 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:11:27 crc kubenswrapper[4940]: E1126 07:11:27.146779 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:29.146765804 +0000 UTC m=+990.666907423 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "webhook-server-cert" not found Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.320906 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" event={"ID":"15d11cf9-51e8-4f1e-880e-86d9bba60224","Type":"ContainerStarted","Data":"53d98e8d9f6d0630520f73fb8e47b04c513b562544dad87e84f25a261cd7f8db"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.322271 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" event={"ID":"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc","Type":"ContainerStarted","Data":"c4418d4ec2eb512cd8d60734cea86986909e1ed914a9658387e6b4c7fc06caf1"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.324134 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" event={"ID":"5c068c7e-f13c-45ca-b161-e590eefdd568","Type":"ContainerStarted","Data":"fc31a5c69125f9cb0f363d75ebda9d75ab3f5aefd2aca2a2800da0b3fe87e0d8"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.325922 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" event={"ID":"68cd61b8-efae-4aef-bd7a-3e90201b5809","Type":"ContainerStarted","Data":"d4cf4fe949533105cb00a9f5994af51158b4e8dfd36b0bdf257f92a5d3243dd4"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.327373 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" event={"ID":"7fcb4d96-f7a7-4ead-a820-db2eb2785a87","Type":"ContainerStarted","Data":"8cb7f4c2cb05541c4df16e408e8ae18e97e7646b81302394908278a190d2691d"} Nov 26 07:11:27 crc kubenswrapper[4940]: E1126 07:11:27.329333 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" podUID="68cd61b8-efae-4aef-bd7a-3e90201b5809" Nov 26 07:11:27 crc kubenswrapper[4940]: E1126 07:11:27.329878 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" podUID="7fcb4d96-f7a7-4ead-a820-db2eb2785a87" Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.330374 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" event={"ID":"0eca9bcc-5909-48e2-927e-b059359977d5","Type":"ContainerStarted","Data":"7d6ca0955a9d4d7e82c9aa6f9506cf9ef7ce5ea9d794b17e8e0c81ab8013296b"} Nov 26 07:11:27 crc kubenswrapper[4940]: E1126 07:11:27.331690 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" podUID="5ac0ef91-42dc-4bed-b5bc-4c668b3249cc" Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.333471 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" event={"ID":"e46c7b1d-e02f-4807-a650-1038eba64162","Type":"ContainerStarted","Data":"b5a4c46226633aba3a965a23b9827afa863078e0904586cd43a9673cc6c3ceaa"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.337568 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" event={"ID":"e8c9bb46-a618-437a-914b-6cb9c1ede58c","Type":"ContainerStarted","Data":"19c3dd6640e572f6186da1d8480d8925cf5e7556893073310f06e3aabd4c76de"} Nov 26 07:11:27 crc kubenswrapper[4940]: E1126 07:11:27.339522 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" podUID="e8c9bb46-a618-437a-914b-6cb9c1ede58c" Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.340815 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" event={"ID":"7756325b-5cc4-4eb6-ae14-5f71924c3413","Type":"ContainerStarted","Data":"a38cfa4c9e9ab9766614561bc0b5291bad07cc7e033d8a1e220eda65fc9fc6c2"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.345961 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" event={"ID":"67bde2c7-9e64-469e-b400-071b32f065da","Type":"ContainerStarted","Data":"49afbd3cb93f27289cf68faf01737776680ccd14fe7bbe183979b385cff5ed41"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.347878 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" event={"ID":"53456206-67c0-4503-b72f-909a3ec07b2a","Type":"ContainerStarted","Data":"f0a69a5d68ec661eed3960a30da1d17b7d18f10ab0cddb0e88731ea403b0b574"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.349305 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" event={"ID":"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4","Type":"ContainerStarted","Data":"7745969e3a12afcdb468467862dbe95e9b1a6272b1ae688d94ffdfef852aec28"} Nov 26 07:11:27 crc kubenswrapper[4940]: I1126 07:11:27.351617 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" event={"ID":"2b2e7f46-8ad4-4361-8e95-76aa1e091665","Type":"ContainerStarted","Data":"2e783f03681bc50132bd769a7b2ce2a9af99778dd8a8c85408d4c47de7a3691d"} Nov 26 07:11:28 crc kubenswrapper[4940]: E1126 07:11:28.369117 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" podUID="e8c9bb46-a618-437a-914b-6cb9c1ede58c" Nov 26 07:11:28 crc kubenswrapper[4940]: E1126 07:11:28.369947 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" podUID="7fcb4d96-f7a7-4ead-a820-db2eb2785a87" Nov 26 07:11:28 crc kubenswrapper[4940]: E1126 07:11:28.370085 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" podUID="5ac0ef91-42dc-4bed-b5bc-4c668b3249cc" Nov 26 07:11:28 crc kubenswrapper[4940]: E1126 07:11:28.370287 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" podUID="68cd61b8-efae-4aef-bd7a-3e90201b5809" Nov 26 07:11:28 crc kubenswrapper[4940]: I1126 07:11:28.587512 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:28 crc kubenswrapper[4940]: E1126 07:11:28.587732 4940 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:28 crc kubenswrapper[4940]: E1126 07:11:28.587782 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert podName:b805b33b-94ee-4037-907b-339573471ddb nodeName:}" failed. No retries permitted until 2025-11-26 07:11:32.587767716 +0000 UTC m=+994.107909335 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert") pod "infra-operator-controller-manager-57548d458d-txjbc" (UID: "b805b33b-94ee-4037-907b-339573471ddb") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:28 crc kubenswrapper[4940]: I1126 07:11:28.890754 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:28 crc kubenswrapper[4940]: E1126 07:11:28.891460 4940 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:28 crc kubenswrapper[4940]: E1126 07:11:28.891513 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert podName:869e95c8-91e8-4b5a-8eda-35c045ee8cbe nodeName:}" failed. No retries permitted until 2025-11-26 07:11:32.891498159 +0000 UTC m=+994.411639778 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" (UID: "869e95c8-91e8-4b5a-8eda-35c045ee8cbe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:29 crc kubenswrapper[4940]: I1126 07:11:29.203454 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:29 crc kubenswrapper[4940]: I1126 07:11:29.203538 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:29 crc kubenswrapper[4940]: E1126 07:11:29.203729 4940 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:11:29 crc kubenswrapper[4940]: E1126 07:11:29.203774 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:33.203760552 +0000 UTC m=+994.723902171 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "metrics-server-cert" not found Nov 26 07:11:29 crc kubenswrapper[4940]: E1126 07:11:29.203818 4940 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:11:29 crc kubenswrapper[4940]: E1126 07:11:29.203839 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:33.203833765 +0000 UTC m=+994.723975384 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "webhook-server-cert" not found Nov 26 07:11:32 crc kubenswrapper[4940]: I1126 07:11:32.667499 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:32 crc kubenswrapper[4940]: E1126 07:11:32.667725 4940 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:32 crc kubenswrapper[4940]: E1126 07:11:32.668587 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert podName:b805b33b-94ee-4037-907b-339573471ddb nodeName:}" failed. No retries permitted until 2025-11-26 07:11:40.66856269 +0000 UTC m=+1002.188704299 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert") pod "infra-operator-controller-manager-57548d458d-txjbc" (UID: "b805b33b-94ee-4037-907b-339573471ddb") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:32 crc kubenswrapper[4940]: I1126 07:11:32.972288 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:32 crc kubenswrapper[4940]: E1126 07:11:32.972460 4940 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:32 crc kubenswrapper[4940]: E1126 07:11:32.972513 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert podName:869e95c8-91e8-4b5a-8eda-35c045ee8cbe nodeName:}" failed. No retries permitted until 2025-11-26 07:11:40.97249561 +0000 UTC m=+1002.492637229 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" (UID: "869e95c8-91e8-4b5a-8eda-35c045ee8cbe") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:11:33 crc kubenswrapper[4940]: I1126 07:11:33.277001 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:33 crc kubenswrapper[4940]: I1126 07:11:33.277157 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:33 crc kubenswrapper[4940]: E1126 07:11:33.277217 4940 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:11:33 crc kubenswrapper[4940]: E1126 07:11:33.277247 4940 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:11:33 crc kubenswrapper[4940]: E1126 07:11:33.277271 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:41.277257355 +0000 UTC m=+1002.797398974 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "webhook-server-cert" not found Nov 26 07:11:33 crc kubenswrapper[4940]: E1126 07:11:33.277285 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:41.277279906 +0000 UTC m=+1002.797421525 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "metrics-server-cert" not found Nov 26 07:11:40 crc kubenswrapper[4940]: E1126 07:11:40.213235 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423" Nov 26 07:11:40 crc kubenswrapper[4940]: E1126 07:11:40.213823 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dfmh8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-2ddr6_openstack-operators(7756325b-5cc4-4eb6-ae14-5f71924c3413): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:11:40 crc kubenswrapper[4940]: I1126 07:11:40.690908 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:40 crc kubenswrapper[4940]: E1126 07:11:40.691116 4940 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:40 crc kubenswrapper[4940]: E1126 07:11:40.691327 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert podName:b805b33b-94ee-4037-907b-339573471ddb nodeName:}" failed. No retries permitted until 2025-11-26 07:11:56.691312466 +0000 UTC m=+1018.211454085 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert") pod "infra-operator-controller-manager-57548d458d-txjbc" (UID: "b805b33b-94ee-4037-907b-339573471ddb") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:11:40 crc kubenswrapper[4940]: I1126 07:11:40.995490 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:41 crc kubenswrapper[4940]: I1126 07:11:41.012341 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/869e95c8-91e8-4b5a-8eda-35c045ee8cbe-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp\" (UID: \"869e95c8-91e8-4b5a-8eda-35c045ee8cbe\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:41 crc kubenswrapper[4940]: E1126 07:11:41.035256 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7" Nov 26 07:11:41 crc kubenswrapper[4940]: E1126 07:11:41.035472 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-948fs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-lzctw_openstack-operators(5c068c7e-f13c-45ca-b161-e590eefdd568): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:11:41 crc kubenswrapper[4940]: I1126 07:11:41.299950 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:41 crc kubenswrapper[4940]: I1126 07:11:41.300103 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:41 crc kubenswrapper[4940]: E1126 07:11:41.300266 4940 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:11:41 crc kubenswrapper[4940]: E1126 07:11:41.300437 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs podName:df395369-43ff-4cd2-af6e-60a9a96a4d66 nodeName:}" failed. No retries permitted until 2025-11-26 07:11:57.300364992 +0000 UTC m=+1018.820506621 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-7dkrp" (UID: "df395369-43ff-4cd2-af6e-60a9a96a4d66") : secret "metrics-server-cert" not found Nov 26 07:11:41 crc kubenswrapper[4940]: I1126 07:11:41.303402 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:41 crc kubenswrapper[4940]: I1126 07:11:41.308308 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:41 crc kubenswrapper[4940]: E1126 07:11:41.733654 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c" Nov 26 07:11:41 crc kubenswrapper[4940]: E1126 07:11:41.733935 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mx44x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-5p8hf_openstack-operators(15d11cf9-51e8-4f1e-880e-86d9bba60224): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:11:42 crc kubenswrapper[4940]: E1126 07:11:42.373209 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7" Nov 26 07:11:42 crc kubenswrapper[4940]: E1126 07:11:42.373775 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-69l59,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-ht6zr_openstack-operators(0eca9bcc-5909-48e2-927e-b059359977d5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:11:51 crc kubenswrapper[4940]: E1126 07:11:51.060324 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711" Nov 26 07:11:51 crc kubenswrapper[4940]: E1126 07:11:51.061160 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lmllf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-xbbg2_openstack-operators(e4255a56-ed59-4cad-90a4-91abb39144d4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:11:51 crc kubenswrapper[4940]: I1126 07:11:51.481729 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp"] Nov 26 07:11:52 crc kubenswrapper[4940]: I1126 07:11:52.547519 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" event={"ID":"869e95c8-91e8-4b5a-8eda-35c045ee8cbe","Type":"ContainerStarted","Data":"d37ae220f7e8f7abe6695fba5199893b8f60af1f85c22be7e24b2135050c77c1"} Nov 26 07:11:53 crc kubenswrapper[4940]: I1126 07:11:53.554612 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" event={"ID":"53456206-67c0-4503-b72f-909a3ec07b2a","Type":"ContainerStarted","Data":"5bfefbe026ea6a73cb5c1cb3b4ff2a611c50c9d151f51528495ed8b45b0e52e4"} Nov 26 07:11:55 crc kubenswrapper[4940]: I1126 07:11:55.575853 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" event={"ID":"86296495-65bc-46a3-a775-621a7bf1745f","Type":"ContainerStarted","Data":"f3b11de0cba5fabe5e4f8b771ef0ba514601283f5a893290ce592c51a22c14c2"} Nov 26 07:11:55 crc kubenswrapper[4940]: I1126 07:11:55.594284 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" event={"ID":"9527c833-8bce-440b-b4e5-ca0a08ef7d28","Type":"ContainerStarted","Data":"f6a59ff24ca24b60c8df863f4660f2261fca6f6898f69caf7a39cd40ea7c334b"} Nov 26 07:11:55 crc kubenswrapper[4940]: I1126 07:11:55.603919 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" event={"ID":"e46c7b1d-e02f-4807-a650-1038eba64162","Type":"ContainerStarted","Data":"113b8f0f73fd0bfdb692f5aca68c205f381d14458d5ac47d9949ce94e3318770"} Nov 26 07:11:55 crc kubenswrapper[4940]: I1126 07:11:55.611930 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" event={"ID":"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4","Type":"ContainerStarted","Data":"770f0e888ed9d736e24b973a0db645ac5c63d4728e3246d8f6213419e6924c37"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.631711 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" event={"ID":"7fcb4d96-f7a7-4ead-a820-db2eb2785a87","Type":"ContainerStarted","Data":"dd335cbc7ae6c9b481eedac74682490a610f90013f79eb0a568075def00a2b2a"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.640846 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" event={"ID":"327827f9-8ca3-4d2e-8478-ace9eb784b21","Type":"ContainerStarted","Data":"a18a90bdf99dc5707aa80d198343866fac1bb9413286ee9c066621e591d80395"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.647209 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" event={"ID":"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5","Type":"ContainerStarted","Data":"2442ed10e98338fffcf8e1b946ea1b87654a5e145291979c0df657ceb62cb316"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.649939 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" event={"ID":"2b2e7f46-8ad4-4361-8e95-76aa1e091665","Type":"ContainerStarted","Data":"6be974cfc983b06d8579e128e6bb85eceb785f44210e86cd7c82d9cdeed56b9f"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.651910 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" event={"ID":"68cd61b8-efae-4aef-bd7a-3e90201b5809","Type":"ContainerStarted","Data":"d36bc06cba1e5be4028d78492a3e4374456c16acd82e71d9f36c6c42021bfad6"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.656848 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" event={"ID":"7ce6057b-0d67-48fc-9d34-b6574eda6978","Type":"ContainerStarted","Data":"f537c33fd1350f4fc54f21bbf168f336ab2ca52bb8da5666d8584afe96ecb173"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.660592 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" event={"ID":"67bde2c7-9e64-469e-b400-071b32f065da","Type":"ContainerStarted","Data":"9db78c16f5b35cf049201d82712db21f533c70a534a80df60cc4689a577b7131"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.663716 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" event={"ID":"b53f82af-849a-47b4-a878-676055ad11ef","Type":"ContainerStarted","Data":"179f7cba17ee6f72630f8e9d0839668d4b1fc29ece555b99ba134421ede5478d"} Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.757951 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.777331 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b805b33b-94ee-4037-907b-339573471ddb-cert\") pod \"infra-operator-controller-manager-57548d458d-txjbc\" (UID: \"b805b33b-94ee-4037-907b-339573471ddb\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:56 crc kubenswrapper[4940]: I1126 07:11:56.829450 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:11:57 crc kubenswrapper[4940]: I1126 07:11:57.371979 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:57 crc kubenswrapper[4940]: I1126 07:11:57.382976 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df395369-43ff-4cd2-af6e-60a9a96a4d66-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-7dkrp\" (UID: \"df395369-43ff-4cd2-af6e-60a9a96a4d66\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:57 crc kubenswrapper[4940]: I1126 07:11:57.576447 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:11:57 crc kubenswrapper[4940]: I1126 07:11:57.671274 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" event={"ID":"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc","Type":"ContainerStarted","Data":"4a27642782b2e952a0f5370d55ba11f8ec629e7e6052c86858b54c7ee7a85af9"} Nov 26 07:11:58 crc kubenswrapper[4940]: I1126 07:11:58.708360 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" event={"ID":"869e95c8-91e8-4b5a-8eda-35c045ee8cbe","Type":"ContainerStarted","Data":"f9bb41181d2edd1504186b07395945c6d729e7b15f2b5b788d649bea02cff690"} Nov 26 07:11:58 crc kubenswrapper[4940]: I1126 07:11:58.717822 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:11:58 crc kubenswrapper[4940]: I1126 07:11:58.717846 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" event={"ID":"7fcb4d96-f7a7-4ead-a820-db2eb2785a87","Type":"ContainerStarted","Data":"51e702c0172e57a9efaae9508ab3293dbc5e1e1f87770dc435d3c10eb1b762eb"} Nov 26 07:11:58 crc kubenswrapper[4940]: I1126 07:11:58.717896 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" event={"ID":"e8c9bb46-a618-437a-914b-6cb9c1ede58c","Type":"ContainerStarted","Data":"25890392692f29e5a659d515eb3bd1d708efd5f84abcee39e0bd480c92fb5a26"} Nov 26 07:11:58 crc kubenswrapper[4940]: I1126 07:11:58.772502 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" podStartSLOduration=2.36450866 podStartE2EDuration="33.772484208s" podCreationTimestamp="2025-11-26 07:11:25 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.947861806 +0000 UTC m=+988.468003425" lastFinishedPulling="2025-11-26 07:11:58.355837364 +0000 UTC m=+1019.875978973" observedRunningTime="2025-11-26 07:11:58.747726943 +0000 UTC m=+1020.267868552" watchObservedRunningTime="2025-11-26 07:11:58.772484208 +0000 UTC m=+1020.292625827" Nov 26 07:11:58 crc kubenswrapper[4940]: I1126 07:11:58.798213 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp"] Nov 26 07:11:58 crc kubenswrapper[4940]: I1126 07:11:58.799895 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" podStartSLOduration=5.281549054 podStartE2EDuration="33.799874446s" podCreationTimestamp="2025-11-26 07:11:25 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.82093445 +0000 UTC m=+988.341076069" lastFinishedPulling="2025-11-26 07:11:55.339259842 +0000 UTC m=+1016.859401461" observedRunningTime="2025-11-26 07:11:58.767087417 +0000 UTC m=+1020.287229036" watchObservedRunningTime="2025-11-26 07:11:58.799874446 +0000 UTC m=+1020.320016065" Nov 26 07:11:58 crc kubenswrapper[4940]: E1126 07:11:58.844565 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" podUID="7756325b-5cc4-4eb6-ae14-5f71924c3413" Nov 26 07:11:58 crc kubenswrapper[4940]: I1126 07:11:58.867670 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-txjbc"] Nov 26 07:11:58 crc kubenswrapper[4940]: E1126 07:11:58.893599 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" Nov 26 07:11:59 crc kubenswrapper[4940]: E1126 07:11:59.196358 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.738818 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" event={"ID":"15d11cf9-51e8-4f1e-880e-86d9bba60224","Type":"ContainerStarted","Data":"dc1af5199ceafe386636f1f7765d338328dc1c1fe0c954102edc130a652fc198"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.758405 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" event={"ID":"7ce6057b-0d67-48fc-9d34-b6574eda6978","Type":"ContainerStarted","Data":"18ca8119d0bdb4f31d7046b81937423e0e0d1e5077c3323d045737c2d1a97608"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.759219 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:11:59 crc kubenswrapper[4940]: E1126 07:11:59.770504 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" podUID="5c068c7e-f13c-45ca-b161-e590eefdd568" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.776147 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" event={"ID":"7756325b-5cc4-4eb6-ae14-5f71924c3413","Type":"ContainerStarted","Data":"5420bcd5c18824cc881d2abc3308c1b1305f8927e28d2f1576d64f10c017f86d"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.781702 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" event={"ID":"b805b33b-94ee-4037-907b-339573471ddb","Type":"ContainerStarted","Data":"2bca5678e7df1830fc55c86d5ba8379eb89e3ac3cf0f141251daa085ffa8a6b6"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.800623 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" podStartSLOduration=2.5629577980000002 podStartE2EDuration="35.800606635s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:25.665751643 +0000 UTC m=+987.185893262" lastFinishedPulling="2025-11-26 07:11:58.90340048 +0000 UTC m=+1020.423542099" observedRunningTime="2025-11-26 07:11:59.791231988 +0000 UTC m=+1021.311373607" watchObservedRunningTime="2025-11-26 07:11:59.800606635 +0000 UTC m=+1021.320748254" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.800665 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" event={"ID":"b53f82af-849a-47b4-a878-676055ad11ef","Type":"ContainerStarted","Data":"077c4680656993fcc6adabd3a9fab6dd267d7d7657e3405cad2b3b2904bbd50f"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.801468 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.814362 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" event={"ID":"0eca9bcc-5909-48e2-927e-b059359977d5","Type":"ContainerStarted","Data":"4860f3cb8ea9d73f4ad5e6b51521580cde4432eeb5dd749728b67e1c7d67f6a5"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.836193 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" event={"ID":"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5","Type":"ContainerStarted","Data":"e4e79888b0a0d92f35b5b606bfdbe9a5df87c66effa460b372afd9d191d18278"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.836764 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.842354 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" event={"ID":"86296495-65bc-46a3-a775-621a7bf1745f","Type":"ContainerStarted","Data":"6f1cf884225b0f13b2275339e3c32e7929d7e4b053de09d0e6a12a3c4cf0e710"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.842870 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.844971 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" podStartSLOduration=3.318721667 podStartE2EDuration="35.844955322s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:25.925598034 +0000 UTC m=+987.445739653" lastFinishedPulling="2025-11-26 07:11:58.451831689 +0000 UTC m=+1019.971973308" observedRunningTime="2025-11-26 07:11:59.842608128 +0000 UTC m=+1021.362749747" watchObservedRunningTime="2025-11-26 07:11:59.844955322 +0000 UTC m=+1021.365096931" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.845394 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" event={"ID":"869e95c8-91e8-4b5a-8eda-35c045ee8cbe","Type":"ContainerStarted","Data":"f0655e37d1878a7a1baa6d1eb8ddbb404da485e274a28385ebb1fedccbec2687"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.845916 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.871114 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" podStartSLOduration=3.366112841 podStartE2EDuration="35.871095331s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.208011151 +0000 UTC m=+987.728152770" lastFinishedPulling="2025-11-26 07:11:58.712993641 +0000 UTC m=+1020.233135260" observedRunningTime="2025-11-26 07:11:59.867506577 +0000 UTC m=+1021.387648206" watchObservedRunningTime="2025-11-26 07:11:59.871095331 +0000 UTC m=+1021.391236950" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.914801 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" event={"ID":"53456206-67c0-4503-b72f-909a3ec07b2a","Type":"ContainerStarted","Data":"2052063a893689128d0af40fb7103181d4c124d42f5dd644550f4768e07f4a09"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.917086 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.934600 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.951318 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" podStartSLOduration=2.806044837 podStartE2EDuration="35.951294474s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:25.925214022 +0000 UTC m=+987.445355641" lastFinishedPulling="2025-11-26 07:11:59.070463659 +0000 UTC m=+1020.590605278" observedRunningTime="2025-11-26 07:11:59.942684231 +0000 UTC m=+1021.462825850" watchObservedRunningTime="2025-11-26 07:11:59.951294474 +0000 UTC m=+1021.471436093" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.974016 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" podStartSLOduration=3.331369714 podStartE2EDuration="34.974001865s" podCreationTimestamp="2025-11-26 07:11:25 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.816432387 +0000 UTC m=+988.336574006" lastFinishedPulling="2025-11-26 07:11:58.459064538 +0000 UTC m=+1019.979206157" observedRunningTime="2025-11-26 07:11:59.964915636 +0000 UTC m=+1021.485057255" watchObservedRunningTime="2025-11-26 07:11:59.974001865 +0000 UTC m=+1021.494143484" Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.991545 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" event={"ID":"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4","Type":"ContainerStarted","Data":"37afdbb11ca2175e6f957c96013eda9dc0a5439bbe95b87e364bfba23df04d52"} Nov 26 07:11:59 crc kubenswrapper[4940]: I1126 07:11:59.992205 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:12:00 crc kubenswrapper[4940]: E1126 07:12:00.008661 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" podUID="e4255a56-ed59-4cad-90a4-91abb39144d4" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.013403 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" event={"ID":"2b2e7f46-8ad4-4361-8e95-76aa1e091665","Type":"ContainerStarted","Data":"deebfd1f39340d1f1fa75f0de7209366013851ec01129e865a88e25c09df0219"} Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.014868 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.016611 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.018394 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" event={"ID":"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc","Type":"ContainerStarted","Data":"5eeebc5dc5b001b01fa2ad87205c1ab064dd45c8b629b6f45ea49d43ed6ecf6a"} Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.019273 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.025613 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" event={"ID":"67bde2c7-9e64-469e-b400-071b32f065da","Type":"ContainerStarted","Data":"7e59dba38d7a15d697fc21fede2ff625d5b694185b18a7d2d32257b701a5b6b0"} Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.027190 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.043949 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" event={"ID":"327827f9-8ca3-4d2e-8478-ace9eb784b21","Type":"ContainerStarted","Data":"53ae549803626b5b7fa5a59bfb2d8f9a7b581da64ef945b742448f3e61a91a60"} Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.045193 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.056906 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.057031 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.057071 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" event={"ID":"df395369-43ff-4cd2-af6e-60a9a96a4d66","Type":"ContainerStarted","Data":"e888cf5b6033f5ecf003b8fbe88a0b40d5ac748a81f955d38898d4bbeffd08a0"} Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.057098 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" event={"ID":"df395369-43ff-4cd2-af6e-60a9a96a4d66","Type":"ContainerStarted","Data":"751094daedc0b26100239e33f43407ab1cb4c03e85d3a5cfd9c60e4633e2eda6"} Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.057745 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.058799 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" podStartSLOduration=3.6735185809999997 podStartE2EDuration="36.058783814s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.553911382 +0000 UTC m=+988.074053001" lastFinishedPulling="2025-11-26 07:11:58.939176615 +0000 UTC m=+1020.459318234" observedRunningTime="2025-11-26 07:12:00.048532068 +0000 UTC m=+1021.568673687" watchObservedRunningTime="2025-11-26 07:12:00.058783814 +0000 UTC m=+1021.578925433" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.059206 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" podStartSLOduration=32.54992828 podStartE2EDuration="36.059202837s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:51.847541761 +0000 UTC m=+1013.367683380" lastFinishedPulling="2025-11-26 07:11:55.356816328 +0000 UTC m=+1016.876957937" observedRunningTime="2025-11-26 07:12:00.024835717 +0000 UTC m=+1021.544977346" watchObservedRunningTime="2025-11-26 07:12:00.059202837 +0000 UTC m=+1021.579344456" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.089249 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" podStartSLOduration=4.277037291 podStartE2EDuration="36.089223999s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.83135382 +0000 UTC m=+988.351495439" lastFinishedPulling="2025-11-26 07:11:58.643540528 +0000 UTC m=+1020.163682147" observedRunningTime="2025-11-26 07:12:00.078262712 +0000 UTC m=+1021.598404351" watchObservedRunningTime="2025-11-26 07:12:00.089223999 +0000 UTC m=+1021.609365628" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.139258 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" podStartSLOduration=3.385674981 podStartE2EDuration="36.139234925s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.167646261 +0000 UTC m=+987.687787880" lastFinishedPulling="2025-11-26 07:11:58.921206195 +0000 UTC m=+1020.441347824" observedRunningTime="2025-11-26 07:12:00.134547877 +0000 UTC m=+1021.654689496" watchObservedRunningTime="2025-11-26 07:12:00.139234925 +0000 UTC m=+1021.659376544" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.158411 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" podStartSLOduration=4.103042112 podStartE2EDuration="36.158392693s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.793141408 +0000 UTC m=+988.313283027" lastFinishedPulling="2025-11-26 07:11:58.848491989 +0000 UTC m=+1020.368633608" observedRunningTime="2025-11-26 07:12:00.110266346 +0000 UTC m=+1021.630407965" watchObservedRunningTime="2025-11-26 07:12:00.158392693 +0000 UTC m=+1021.678534312" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.178673 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" podStartSLOduration=3.3237163929999998 podStartE2EDuration="35.178653276s" podCreationTimestamp="2025-11-26 07:11:25 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.925255939 +0000 UTC m=+988.445397558" lastFinishedPulling="2025-11-26 07:11:58.780192822 +0000 UTC m=+1020.300334441" observedRunningTime="2025-11-26 07:12:00.158603749 +0000 UTC m=+1021.678745378" watchObservedRunningTime="2025-11-26 07:12:00.178653276 +0000 UTC m=+1021.698794895" Nov 26 07:12:00 crc kubenswrapper[4940]: I1126 07:12:00.243529 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" podStartSLOduration=35.243510633 podStartE2EDuration="35.243510633s" podCreationTimestamp="2025-11-26 07:11:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:12:00.239369981 +0000 UTC m=+1021.759511610" watchObservedRunningTime="2025-11-26 07:12:00.243510633 +0000 UTC m=+1021.763652262" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.058689 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" event={"ID":"9527c833-8bce-440b-b4e5-ca0a08ef7d28","Type":"ContainerStarted","Data":"5ea31e2775640ff44225de5de73c899b56043919bf512be1149d945f9e9e6e5a"} Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.059330 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.064694 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.065879 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" event={"ID":"5c068c7e-f13c-45ca-b161-e590eefdd568","Type":"ContainerStarted","Data":"fdfd1f07eb7889cca55926c8beb25d4d0d6df23e489b4a34ed6a7d04e91b1d0a"} Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.076657 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" event={"ID":"68cd61b8-efae-4aef-bd7a-3e90201b5809","Type":"ContainerStarted","Data":"fe996fcc5f7408200e347072c33947d7cada6d62015e6bdb5c067c22daf394bd"} Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.077128 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.080587 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.083978 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" event={"ID":"7756325b-5cc4-4eb6-ae14-5f71924c3413","Type":"ContainerStarted","Data":"66e7036b9edbb973ac2dfbc3f9f84d9976a9419baa0b52a8f311c421d91c0fd1"} Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.084898 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.095573 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" event={"ID":"e46c7b1d-e02f-4807-a650-1038eba64162","Type":"ContainerStarted","Data":"ea24f4925f902df7fcaf20905f1ca8f6587bc67f7ff3a8eb8a886829513dbcee"} Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.096663 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.101209 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.108251 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" event={"ID":"e4255a56-ed59-4cad-90a4-91abb39144d4","Type":"ContainerStarted","Data":"fb594e4f816509042ac44b687b0e17777539617ccfd6c489255665753bc811e3"} Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.111906 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" podStartSLOduration=4.361560202 podStartE2EDuration="37.111882324s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.171127651 +0000 UTC m=+987.691269270" lastFinishedPulling="2025-11-26 07:11:58.921449773 +0000 UTC m=+1020.441591392" observedRunningTime="2025-11-26 07:12:01.102275799 +0000 UTC m=+1022.622417418" watchObservedRunningTime="2025-11-26 07:12:01.111882324 +0000 UTC m=+1022.632023943" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.130795 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" podStartSLOduration=3.61234054 podStartE2EDuration="37.130782083s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.751755846 +0000 UTC m=+988.271897465" lastFinishedPulling="2025-11-26 07:12:00.270197389 +0000 UTC m=+1021.790339008" observedRunningTime="2025-11-26 07:12:01.130658789 +0000 UTC m=+1022.650800408" watchObservedRunningTime="2025-11-26 07:12:01.130782083 +0000 UTC m=+1022.650923712" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.142849 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" event={"ID":"15d11cf9-51e8-4f1e-880e-86d9bba60224","Type":"ContainerStarted","Data":"c0db42120d7511268a3abd51edbe7223da64ad665cf756192a15c7f7f439fd35"} Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.142891 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.146322 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.149238 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.149295 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.149622 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.149781 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.156782 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.231297 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" podStartSLOduration=4.936986772 podStartE2EDuration="37.23127552s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.816344584 +0000 UTC m=+988.336486203" lastFinishedPulling="2025-11-26 07:11:59.110633332 +0000 UTC m=+1020.630774951" observedRunningTime="2025-11-26 07:12:01.218830015 +0000 UTC m=+1022.738971634" watchObservedRunningTime="2025-11-26 07:12:01.23127552 +0000 UTC m=+1022.751417139" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.292744 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" podStartSLOduration=5.146687892 podStartE2EDuration="37.292722579s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.81872241 +0000 UTC m=+988.338864029" lastFinishedPulling="2025-11-26 07:11:58.964757097 +0000 UTC m=+1020.484898716" observedRunningTime="2025-11-26 07:12:01.289350842 +0000 UTC m=+1022.809492451" watchObservedRunningTime="2025-11-26 07:12:01.292722579 +0000 UTC m=+1022.812864198" Nov 26 07:12:01 crc kubenswrapper[4940]: I1126 07:12:01.496095 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" podStartSLOduration=3.946348863 podStartE2EDuration="37.496078888s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.793555692 +0000 UTC m=+988.313697311" lastFinishedPulling="2025-11-26 07:12:00.343285717 +0000 UTC m=+1021.863427336" observedRunningTime="2025-11-26 07:12:01.495380366 +0000 UTC m=+1023.015521985" watchObservedRunningTime="2025-11-26 07:12:01.496078888 +0000 UTC m=+1023.016220507" Nov 26 07:12:02 crc kubenswrapper[4940]: I1126 07:12:02.156335 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" event={"ID":"0eca9bcc-5909-48e2-927e-b059359977d5","Type":"ContainerStarted","Data":"c66ae8354053587003a3020533f39b00b608d4e75aac5bea19e35aa9963916db"} Nov 26 07:12:02 crc kubenswrapper[4940]: I1126 07:12:02.161577 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:12:02 crc kubenswrapper[4940]: I1126 07:12:02.178271 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" podStartSLOduration=4.480227896 podStartE2EDuration="38.178255014s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.751836679 +0000 UTC m=+988.271978288" lastFinishedPulling="2025-11-26 07:12:00.449863787 +0000 UTC m=+1021.970005406" observedRunningTime="2025-11-26 07:12:02.177255182 +0000 UTC m=+1023.697396801" watchObservedRunningTime="2025-11-26 07:12:02.178255014 +0000 UTC m=+1023.698396633" Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.173837 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.174126 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" event={"ID":"e4255a56-ed59-4cad-90a4-91abb39144d4","Type":"ContainerStarted","Data":"a0797cbb3651c470f7c2c79cd72664b0ecf34cb8800ecb746a8e38816bc274d9"} Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.174149 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.174413 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" event={"ID":"5c068c7e-f13c-45ca-b161-e590eefdd568","Type":"ContainerStarted","Data":"2e2fef8e305481c0f31aa7530decb9f6de8e189b3c14190ad8d0f07303323c0f"} Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.174431 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.174441 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.174448 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" event={"ID":"b805b33b-94ee-4037-907b-339573471ddb","Type":"ContainerStarted","Data":"ee702f350372c3cdf15bf63925aa2991ea32956b692cb0c3b352357b489a6eb5"} Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.174458 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" event={"ID":"b805b33b-94ee-4037-907b-339573471ddb","Type":"ContainerStarted","Data":"dae30e891806b3836d03aea29e21b4cac7c6e521846ab376bbffa3ac6c9b6acf"} Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.192134 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" podStartSLOduration=3.214887814 podStartE2EDuration="39.192113429s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.244293031 +0000 UTC m=+987.764434640" lastFinishedPulling="2025-11-26 07:12:02.221518626 +0000 UTC m=+1023.741660255" observedRunningTime="2025-11-26 07:12:03.187143411 +0000 UTC m=+1024.707285080" watchObservedRunningTime="2025-11-26 07:12:03.192113429 +0000 UTC m=+1024.712255068" Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.209814 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" podStartSLOduration=3.7101694910000003 podStartE2EDuration="39.209797389s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:26.726197705 +0000 UTC m=+988.246339324" lastFinishedPulling="2025-11-26 07:12:02.225825603 +0000 UTC m=+1023.745967222" observedRunningTime="2025-11-26 07:12:03.208987133 +0000 UTC m=+1024.729128782" watchObservedRunningTime="2025-11-26 07:12:03.209797389 +0000 UTC m=+1024.729939008" Nov 26 07:12:03 crc kubenswrapper[4940]: I1126 07:12:03.232964 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" podStartSLOduration=35.910815272 podStartE2EDuration="39.232887642s" podCreationTimestamp="2025-11-26 07:11:24 +0000 UTC" firstStartedPulling="2025-11-26 07:11:58.908700299 +0000 UTC m=+1020.428841918" lastFinishedPulling="2025-11-26 07:12:02.230772669 +0000 UTC m=+1023.750914288" observedRunningTime="2025-11-26 07:12:03.230765315 +0000 UTC m=+1024.750907004" watchObservedRunningTime="2025-11-26 07:12:03.232887642 +0000 UTC m=+1024.753029271" Nov 26 07:12:05 crc kubenswrapper[4940]: I1126 07:12:05.385118 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:12:05 crc kubenswrapper[4940]: I1126 07:12:05.426815 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:12:07 crc kubenswrapper[4940]: I1126 07:12:07.590245 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:12:11 crc kubenswrapper[4940]: I1126 07:12:11.315261 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:12:15 crc kubenswrapper[4940]: I1126 07:12:15.297890 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:12:15 crc kubenswrapper[4940]: I1126 07:12:15.356411 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:12:15 crc kubenswrapper[4940]: I1126 07:12:15.380660 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:12:16 crc kubenswrapper[4940]: I1126 07:12:16.835300 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.000355 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-b8wg7"] Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.002081 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.004716 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.005118 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.005361 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-px9pn" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.005511 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.021392 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-b8wg7"] Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.061702 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6584b49599-556wf"] Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.063284 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.065654 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.072884 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-556wf"] Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.093747 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9l7v\" (UniqueName: \"kubernetes.io/projected/8d45d551-e818-4892-b721-b4e869f94589-kube-api-access-r9l7v\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.093796 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-config\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.093834 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xsth\" (UniqueName: \"kubernetes.io/projected/575186d6-7607-4836-b9fa-87767e5a7da1-kube-api-access-8xsth\") pod \"dnsmasq-dns-7bdd77c89-b8wg7\" (UID: \"575186d6-7607-4836-b9fa-87767e5a7da1\") " pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.093865 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/575186d6-7607-4836-b9fa-87767e5a7da1-config\") pod \"dnsmasq-dns-7bdd77c89-b8wg7\" (UID: \"575186d6-7607-4836-b9fa-87767e5a7da1\") " pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.094000 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-dns-svc\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.195742 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/575186d6-7607-4836-b9fa-87767e5a7da1-config\") pod \"dnsmasq-dns-7bdd77c89-b8wg7\" (UID: \"575186d6-7607-4836-b9fa-87767e5a7da1\") " pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.195786 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-dns-svc\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.195869 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9l7v\" (UniqueName: \"kubernetes.io/projected/8d45d551-e818-4892-b721-b4e869f94589-kube-api-access-r9l7v\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.195889 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-config\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.195913 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xsth\" (UniqueName: \"kubernetes.io/projected/575186d6-7607-4836-b9fa-87767e5a7da1-kube-api-access-8xsth\") pod \"dnsmasq-dns-7bdd77c89-b8wg7\" (UID: \"575186d6-7607-4836-b9fa-87767e5a7da1\") " pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.196701 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-dns-svc\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.196773 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/575186d6-7607-4836-b9fa-87767e5a7da1-config\") pod \"dnsmasq-dns-7bdd77c89-b8wg7\" (UID: \"575186d6-7607-4836-b9fa-87767e5a7da1\") " pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.197130 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-config\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.214835 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xsth\" (UniqueName: \"kubernetes.io/projected/575186d6-7607-4836-b9fa-87767e5a7da1-kube-api-access-8xsth\") pod \"dnsmasq-dns-7bdd77c89-b8wg7\" (UID: \"575186d6-7607-4836-b9fa-87767e5a7da1\") " pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.215986 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9l7v\" (UniqueName: \"kubernetes.io/projected/8d45d551-e818-4892-b721-b4e869f94589-kube-api-access-r9l7v\") pod \"dnsmasq-dns-6584b49599-556wf\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.328108 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.380895 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.906195 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-b8wg7"] Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.908919 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:12:32 crc kubenswrapper[4940]: I1126 07:12:32.961018 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-556wf"] Nov 26 07:12:32 crc kubenswrapper[4940]: W1126 07:12:32.968483 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d45d551_e818_4892_b721_b4e869f94589.slice/crio-518a766b4a334619ccbe9f590521d02db000869b4969d38dfe8010b581a29dab WatchSource:0}: Error finding container 518a766b4a334619ccbe9f590521d02db000869b4969d38dfe8010b581a29dab: Status 404 returned error can't find the container with id 518a766b4a334619ccbe9f590521d02db000869b4969d38dfe8010b581a29dab Nov 26 07:12:33 crc kubenswrapper[4940]: I1126 07:12:33.409833 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-556wf" event={"ID":"8d45d551-e818-4892-b721-b4e869f94589","Type":"ContainerStarted","Data":"518a766b4a334619ccbe9f590521d02db000869b4969d38dfe8010b581a29dab"} Nov 26 07:12:33 crc kubenswrapper[4940]: I1126 07:12:33.412497 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" event={"ID":"575186d6-7607-4836-b9fa-87767e5a7da1","Type":"ContainerStarted","Data":"76994cf10a6b79789ce44e8f225f77185584895891bd09a1847ae9471be401bb"} Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.239219 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-b8wg7"] Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.265342 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d8746976c-vkf6q"] Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.266672 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.277658 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d8746976c-vkf6q"] Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.326615 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-config\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.326707 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9njx8\" (UniqueName: \"kubernetes.io/projected/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-kube-api-access-9njx8\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.326727 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-dns-svc\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.428846 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-config\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.428942 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9njx8\" (UniqueName: \"kubernetes.io/projected/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-kube-api-access-9njx8\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.428970 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-dns-svc\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.430007 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-dns-svc\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.430020 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-config\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.451621 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9njx8\" (UniqueName: \"kubernetes.io/projected/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-kube-api-access-9njx8\") pod \"dnsmasq-dns-6d8746976c-vkf6q\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.545933 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-556wf"] Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.568767 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-2lb2h"] Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.569997 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.579576 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-2lb2h"] Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.602395 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.636378 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-config\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.636442 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-dns-svc\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.636577 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc8ck\" (UniqueName: \"kubernetes.io/projected/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-kube-api-access-cc8ck\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.741893 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc8ck\" (UniqueName: \"kubernetes.io/projected/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-kube-api-access-cc8ck\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.742088 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-config\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.742116 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-dns-svc\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.743501 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-dns-svc\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.744193 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-config\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.769955 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc8ck\" (UniqueName: \"kubernetes.io/projected/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-kube-api-access-cc8ck\") pod \"dnsmasq-dns-6486446b9f-2lb2h\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:34 crc kubenswrapper[4940]: I1126 07:12:34.897408 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.126068 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d8746976c-vkf6q"] Nov 26 07:12:35 crc kubenswrapper[4940]: W1126 07:12:35.134480 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2bbaafab_23d1_4141_93cd_bdf1f4b3424e.slice/crio-fe29d5a2b8f0b8f88259bbaa75be926931f1573746526c71461211e6b68a850d WatchSource:0}: Error finding container fe29d5a2b8f0b8f88259bbaa75be926931f1573746526c71461211e6b68a850d: Status 404 returned error can't find the container with id fe29d5a2b8f0b8f88259bbaa75be926931f1573746526c71461211e6b68a850d Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.371763 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-2lb2h"] Nov 26 07:12:35 crc kubenswrapper[4940]: W1126 07:12:35.378023 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d0a18f1_b7be_4221_be26_abfe4e69b3ed.slice/crio-d66df7d22de3aaac8b32c3228fb1f302b170e8091f692454e7bfb71ba1607fb8 WatchSource:0}: Error finding container d66df7d22de3aaac8b32c3228fb1f302b170e8091f692454e7bfb71ba1607fb8: Status 404 returned error can't find the container with id d66df7d22de3aaac8b32c3228fb1f302b170e8091f692454e7bfb71ba1607fb8 Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.415472 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.416854 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.420429 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.420505 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.420429 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.420597 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.420705 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.420612 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.421056 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-7xwzp" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.429215 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.462176 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" event={"ID":"1d0a18f1-b7be-4221-be26-abfe4e69b3ed","Type":"ContainerStarted","Data":"d66df7d22de3aaac8b32c3228fb1f302b170e8091f692454e7bfb71ba1607fb8"} Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.465300 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" event={"ID":"2bbaafab-23d1-4141-93cd-bdf1f4b3424e","Type":"ContainerStarted","Data":"fe29d5a2b8f0b8f88259bbaa75be926931f1573746526c71461211e6b68a850d"} Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553571 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/69972749-03ff-48e9-b031-99c33ce86e96-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553620 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qths2\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-kube-api-access-qths2\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553648 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553693 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553732 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553769 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553793 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/69972749-03ff-48e9-b031-99c33ce86e96-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553819 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553843 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553922 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.553961 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655571 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655623 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655656 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qths2\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-kube-api-access-qths2\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655680 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/69972749-03ff-48e9-b031-99c33ce86e96-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655702 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655721 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655743 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655768 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655784 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/69972749-03ff-48e9-b031-99c33ce86e96-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655799 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.655820 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.656939 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.657532 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.657813 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.659653 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.660344 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.663260 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.668179 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.671730 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/69972749-03ff-48e9-b031-99c33ce86e96-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.675334 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/69972749-03ff-48e9-b031-99c33ce86e96-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.675428 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qths2\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-kube-api-access-qths2\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.678413 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.680303 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.682781 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.683009 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-24475" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.683202 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.683622 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.683777 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.683932 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.684101 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.690668 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.691605 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.708984 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.749958 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.861381 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/11a17576-9a94-4e2d-8915-9d838de09f0b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.861731 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.861757 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct48l\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-kube-api-access-ct48l\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.861840 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.861891 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.861920 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/11a17576-9a94-4e2d-8915-9d838de09f0b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.861953 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.861978 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.862028 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.862091 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.862117 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963686 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963731 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963764 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/11a17576-9a94-4e2d-8915-9d838de09f0b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963800 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963825 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963879 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963930 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963954 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.963984 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/11a17576-9a94-4e2d-8915-9d838de09f0b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.964013 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.964055 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct48l\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-kube-api-access-ct48l\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.968913 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.969728 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.971457 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.971655 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.972109 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.972456 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.975464 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.976099 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.988574 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/11a17576-9a94-4e2d-8915-9d838de09f0b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.988730 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct48l\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-kube-api-access-ct48l\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:35 crc kubenswrapper[4940]: I1126 07:12:35.996839 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/11a17576-9a94-4e2d-8915-9d838de09f0b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:36 crc kubenswrapper[4940]: I1126 07:12:36.011963 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " pod="openstack/rabbitmq-server-0" Nov 26 07:12:36 crc kubenswrapper[4940]: I1126 07:12:36.105322 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:12:36 crc kubenswrapper[4940]: I1126 07:12:36.218322 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:12:36 crc kubenswrapper[4940]: I1126 07:12:36.480701 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"69972749-03ff-48e9-b031-99c33ce86e96","Type":"ContainerStarted","Data":"09a2c45b9089f05df04ff2ec41301d3fa43172c173cb00961cf16e39ce91da21"} Nov 26 07:12:36 crc kubenswrapper[4940]: I1126 07:12:36.712543 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:12:36 crc kubenswrapper[4940]: W1126 07:12:36.748354 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11a17576_9a94_4e2d_8915_9d838de09f0b.slice/crio-97f14370f1831386762b36af2b7bf5f590662167ed2c659b48ed755255c404c4 WatchSource:0}: Error finding container 97f14370f1831386762b36af2b7bf5f590662167ed2c659b48ed755255c404c4: Status 404 returned error can't find the container with id 97f14370f1831386762b36af2b7bf5f590662167ed2c659b48ed755255c404c4 Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.298187 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.299348 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.300937 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.305321 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.305463 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.305487 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-f9gc5" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.334881 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.345948 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.383345 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.383406 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llk2t\" (UniqueName: \"kubernetes.io/projected/a6c56309-82af-4734-a3d4-6c203fd5b23e-kube-api-access-llk2t\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.383437 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.383482 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.383592 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.383628 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-default\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.383661 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.383707 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-kolla-config\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.485410 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.485464 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.485498 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-default\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.485527 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.485561 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-kolla-config\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.485587 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.485607 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llk2t\" (UniqueName: \"kubernetes.io/projected/a6c56309-82af-4734-a3d4-6c203fd5b23e-kube-api-access-llk2t\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.485622 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.487048 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.487258 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.487700 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-kolla-config\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.491704 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.491973 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.492798 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-default\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.492975 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.503080 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"11a17576-9a94-4e2d-8915-9d838de09f0b","Type":"ContainerStarted","Data":"97f14370f1831386762b36af2b7bf5f590662167ed2c659b48ed755255c404c4"} Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.509501 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llk2t\" (UniqueName: \"kubernetes.io/projected/a6c56309-82af-4734-a3d4-6c203fd5b23e-kube-api-access-llk2t\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.513732 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " pod="openstack/openstack-galera-0" Nov 26 07:12:37 crc kubenswrapper[4940]: I1126 07:12:37.632584 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.619749 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.621630 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.626118 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-bnnq8" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.627439 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.627668 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.627828 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.631632 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.708841 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.708915 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.708939 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdrdt\" (UniqueName: \"kubernetes.io/projected/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kube-api-access-tdrdt\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.708959 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.708995 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.709028 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.709204 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.709284 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.810854 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.811144 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdrdt\" (UniqueName: \"kubernetes.io/projected/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kube-api-access-tdrdt\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.811168 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.811204 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.811227 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.811247 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.811271 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.811320 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.811392 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.812004 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.812254 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.813723 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.814618 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.817590 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.817818 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.836803 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdrdt\" (UniqueName: \"kubernetes.io/projected/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kube-api-access-tdrdt\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.851890 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.969965 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.974353 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.977590 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.977680 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-tqwxn" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.977813 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.985184 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 07:12:38 crc kubenswrapper[4940]: I1126 07:12:38.985486 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.025698 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-kolla-config\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.025756 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.025778 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6tgd\" (UniqueName: \"kubernetes.io/projected/69f67fa7-ea74-4966-b69c-ab547896057e-kube-api-access-k6tgd\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.025846 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-config-data\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.025893 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.127698 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-config-data\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.127773 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.127818 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-kolla-config\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.127841 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.127864 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6tgd\" (UniqueName: \"kubernetes.io/projected/69f67fa7-ea74-4966-b69c-ab547896057e-kube-api-access-k6tgd\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.128681 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-config-data\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.129230 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-kolla-config\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.142160 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.149437 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6tgd\" (UniqueName: \"kubernetes.io/projected/69f67fa7-ea74-4966-b69c-ab547896057e-kube-api-access-k6tgd\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.150697 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " pod="openstack/memcached-0" Nov 26 07:12:39 crc kubenswrapper[4940]: I1126 07:12:39.303355 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 07:12:40 crc kubenswrapper[4940]: I1126 07:12:40.935820 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:12:40 crc kubenswrapper[4940]: I1126 07:12:40.938702 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:12:40 crc kubenswrapper[4940]: I1126 07:12:40.942414 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-592q7" Nov 26 07:12:40 crc kubenswrapper[4940]: I1126 07:12:40.943249 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:12:41 crc kubenswrapper[4940]: I1126 07:12:41.077070 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bv7p\" (UniqueName: \"kubernetes.io/projected/7204b63f-239d-4f0e-96ce-1cf0ad85382a-kube-api-access-6bv7p\") pod \"kube-state-metrics-0\" (UID: \"7204b63f-239d-4f0e-96ce-1cf0ad85382a\") " pod="openstack/kube-state-metrics-0" Nov 26 07:12:41 crc kubenswrapper[4940]: I1126 07:12:41.178182 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bv7p\" (UniqueName: \"kubernetes.io/projected/7204b63f-239d-4f0e-96ce-1cf0ad85382a-kube-api-access-6bv7p\") pod \"kube-state-metrics-0\" (UID: \"7204b63f-239d-4f0e-96ce-1cf0ad85382a\") " pod="openstack/kube-state-metrics-0" Nov 26 07:12:41 crc kubenswrapper[4940]: I1126 07:12:41.196858 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bv7p\" (UniqueName: \"kubernetes.io/projected/7204b63f-239d-4f0e-96ce-1cf0ad85382a-kube-api-access-6bv7p\") pod \"kube-state-metrics-0\" (UID: \"7204b63f-239d-4f0e-96ce-1cf0ad85382a\") " pod="openstack/kube-state-metrics-0" Nov 26 07:12:41 crc kubenswrapper[4940]: I1126 07:12:41.265849 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.253297 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-78r7g"] Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.254800 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.256861 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.260433 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.260451 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-dln9t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.270916 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-78r7g"] Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.278157 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-k9l7t"] Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.279876 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.307452 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-k9l7t"] Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355278 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-ovn-controller-tls-certs\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355322 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-run\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355352 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzrdv\" (UniqueName: \"kubernetes.io/projected/5f26eaaa-63b0-491d-b664-56edff3be80c-kube-api-access-qzrdv\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355373 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355448 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ad552be-e28a-4873-a90e-867bd6efc437-scripts\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355464 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtx8b\" (UniqueName: \"kubernetes.io/projected/7ad552be-e28a-4873-a90e-867bd6efc437-kube-api-access-dtx8b\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355578 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-etc-ovs\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355600 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f26eaaa-63b0-491d-b664-56edff3be80c-scripts\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355625 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-log-ovn\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355660 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-log\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355774 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-combined-ca-bundle\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355821 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-lib\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.355896 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run-ovn\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457248 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzrdv\" (UniqueName: \"kubernetes.io/projected/5f26eaaa-63b0-491d-b664-56edff3be80c-kube-api-access-qzrdv\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457292 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457327 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ad552be-e28a-4873-a90e-867bd6efc437-scripts\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457342 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtx8b\" (UniqueName: \"kubernetes.io/projected/7ad552be-e28a-4873-a90e-867bd6efc437-kube-api-access-dtx8b\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457368 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-etc-ovs\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457386 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f26eaaa-63b0-491d-b664-56edff3be80c-scripts\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457412 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-log-ovn\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457447 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-log\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457469 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-combined-ca-bundle\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457489 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-lib\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457510 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run-ovn\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457543 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-ovn-controller-tls-certs\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457557 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-run\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457925 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-run\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457944 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.457993 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-etc-ovs\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.458001 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run-ovn\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.458157 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-lib\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.458225 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-log-ovn\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.458275 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-log\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.459483 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ad552be-e28a-4873-a90e-867bd6efc437-scripts\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.459834 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f26eaaa-63b0-491d-b664-56edff3be80c-scripts\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.463959 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-ovn-controller-tls-certs\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.480533 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtx8b\" (UniqueName: \"kubernetes.io/projected/7ad552be-e28a-4873-a90e-867bd6efc437-kube-api-access-dtx8b\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.482838 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzrdv\" (UniqueName: \"kubernetes.io/projected/5f26eaaa-63b0-491d-b664-56edff3be80c-kube-api-access-qzrdv\") pod \"ovn-controller-ovs-k9l7t\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.483030 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-combined-ca-bundle\") pod \"ovn-controller-78r7g\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.575864 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g" Nov 26 07:12:45 crc kubenswrapper[4940]: I1126 07:12:45.613908 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.141969 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.143761 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.146100 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.146324 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.146561 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.146828 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.147155 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-vct5k" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.159643 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.272645 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-config\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.272717 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/99d95f08-663c-4443-9a16-459f02985879-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.272779 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.272826 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.272967 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.273082 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dzm4\" (UniqueName: \"kubernetes.io/projected/99d95f08-663c-4443-9a16-459f02985879-kube-api-access-9dzm4\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.273111 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.273197 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.375165 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.375237 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-config\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.375272 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/99d95f08-663c-4443-9a16-459f02985879-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.375314 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.375359 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.375400 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.375446 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dzm4\" (UniqueName: \"kubernetes.io/projected/99d95f08-663c-4443-9a16-459f02985879-kube-api-access-9dzm4\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.375468 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.376129 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.376185 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-config\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.376692 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/99d95f08-663c-4443-9a16-459f02985879-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.376954 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.379203 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.379230 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.379649 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.396980 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.398802 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dzm4\" (UniqueName: \"kubernetes.io/projected/99d95f08-663c-4443-9a16-459f02985879-kube-api-access-9dzm4\") pod \"ovsdbserver-nb-0\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:46 crc kubenswrapper[4940]: I1126 07:12:46.475636 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.289108 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.291138 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.294151 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.295273 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-grtff" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.295455 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.295592 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.299403 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.407817 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.407889 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.407919 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-config\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.407983 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7cdd\" (UniqueName: \"kubernetes.io/projected/69f4262a-7eb3-4091-b103-393b9ab3a720-kube-api-access-w7cdd\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.408159 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.408203 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.408244 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.408330 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510090 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510142 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510175 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-config\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510226 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7cdd\" (UniqueName: \"kubernetes.io/projected/69f4262a-7eb3-4091-b103-393b9ab3a720-kube-api-access-w7cdd\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510259 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510284 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510311 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510344 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510348 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.510787 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.511533 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-config\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.511543 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.518377 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.518959 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.530299 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7cdd\" (UniqueName: \"kubernetes.io/projected/69f4262a-7eb3-4091-b103-393b9ab3a720-kube-api-access-w7cdd\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.539351 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.542233 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:48 crc kubenswrapper[4940]: I1126 07:12:48.610615 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 07:12:51 crc kubenswrapper[4940]: I1126 07:12:51.024011 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.399628 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.399939 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8xsth,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bdd77c89-b8wg7_openstack(575186d6-7607-4836-b9fa-87767e5a7da1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.401208 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" podUID="575186d6-7607-4836-b9fa-87767e5a7da1" Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.445311 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.445900 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cc8ck,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6486446b9f-2lb2h_openstack(1d0a18f1-b7be-4221-be26-abfe4e69b3ed): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.448595 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" podUID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.471422 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.471764 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r9l7v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6584b49599-556wf_openstack(8d45d551-e818-4892-b721-b4e869f94589): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:12:51 crc kubenswrapper[4940]: E1126 07:12:51.472895 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6584b49599-556wf" podUID="8d45d551-e818-4892-b721-b4e869f94589" Nov 26 07:12:51 crc kubenswrapper[4940]: I1126 07:12:51.614530 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7204b63f-239d-4f0e-96ce-1cf0ad85382a","Type":"ContainerStarted","Data":"a1fa4cbc35e749451233b43051cf39d7fc9bd25da9d92292216f41723851d1a0"} Nov 26 07:12:51 crc kubenswrapper[4940]: I1126 07:12:51.840807 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:12:51 crc kubenswrapper[4940]: W1126 07:12:51.901744 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f53eb12_2c7d_4107_9d63_f0db8e983d90.slice/crio-035f9273b7b7ca13ed24a3f44a51aea64f6a3e8a5083b9795e0cb4bd1cc6766c WatchSource:0}: Error finding container 035f9273b7b7ca13ed24a3f44a51aea64f6a3e8a5083b9795e0cb4bd1cc6766c: Status 404 returned error can't find the container with id 035f9273b7b7ca13ed24a3f44a51aea64f6a3e8a5083b9795e0cb4bd1cc6766c Nov 26 07:12:51 crc kubenswrapper[4940]: I1126 07:12:51.988142 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 07:12:51 crc kubenswrapper[4940]: I1126 07:12:51.996785 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:12:52 crc kubenswrapper[4940]: W1126 07:12:52.001888 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69f67fa7_ea74_4966_b69c_ab547896057e.slice/crio-96daa4d06959246193e4e52966cb1077a7a8319911905a3cea480a0d594ab40e WatchSource:0}: Error finding container 96daa4d06959246193e4e52966cb1077a7a8319911905a3cea480a0d594ab40e: Status 404 returned error can't find the container with id 96daa4d06959246193e4e52966cb1077a7a8319911905a3cea480a0d594ab40e Nov 26 07:12:52 crc kubenswrapper[4940]: W1126 07:12:52.072522 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6c56309_82af_4734_a3d4_6c203fd5b23e.slice/crio-5271caf04fd67d53db774793b10659d6750e94bf921bf9bb568acb4d9f4590bc WatchSource:0}: Error finding container 5271caf04fd67d53db774793b10659d6750e94bf921bf9bb568acb4d9f4590bc: Status 404 returned error can't find the container with id 5271caf04fd67d53db774793b10659d6750e94bf921bf9bb568acb4d9f4590bc Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.376626 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-78r7g"] Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.471850 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.623938 4940 generic.go:334] "Generic (PLEG): container finished" podID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" containerID="cf513408a889df5d299f00f39da6a8a61133d3da2ad562c3d244a5a1639b6546" exitCode=0 Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.624337 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" event={"ID":"2bbaafab-23d1-4141-93cd-bdf1f4b3424e","Type":"ContainerDied","Data":"cf513408a889df5d299f00f39da6a8a61133d3da2ad562c3d244a5a1639b6546"} Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.625431 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"69f67fa7-ea74-4966-b69c-ab547896057e","Type":"ContainerStarted","Data":"96daa4d06959246193e4e52966cb1077a7a8319911905a3cea480a0d594ab40e"} Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.626630 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3f53eb12-2c7d-4107-9d63-f0db8e983d90","Type":"ContainerStarted","Data":"035f9273b7b7ca13ed24a3f44a51aea64f6a3e8a5083b9795e0cb4bd1cc6766c"} Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.627536 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a6c56309-82af-4734-a3d4-6c203fd5b23e","Type":"ContainerStarted","Data":"5271caf04fd67d53db774793b10659d6750e94bf921bf9bb568acb4d9f4590bc"} Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.628753 4940 generic.go:334] "Generic (PLEG): container finished" podID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" containerID="84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720" exitCode=0 Nov 26 07:12:52 crc kubenswrapper[4940]: I1126 07:12:52.628791 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" event={"ID":"1d0a18f1-b7be-4221-be26-abfe4e69b3ed","Type":"ContainerDied","Data":"84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720"} Nov 26 07:12:53 crc kubenswrapper[4940]: I1126 07:12:53.034101 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-k9l7t"] Nov 26 07:12:53 crc kubenswrapper[4940]: I1126 07:12:53.277796 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:12:55 crc kubenswrapper[4940]: W1126 07:12:55.178316 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f26eaaa_63b0_491d_b664_56edff3be80c.slice/crio-7008a7f727d0ac277f063ee8b3bf7aff4869d564f571be3cdbf684b868433a99 WatchSource:0}: Error finding container 7008a7f727d0ac277f063ee8b3bf7aff4869d564f571be3cdbf684b868433a99: Status 404 returned error can't find the container with id 7008a7f727d0ac277f063ee8b3bf7aff4869d564f571be3cdbf684b868433a99 Nov 26 07:12:55 crc kubenswrapper[4940]: W1126 07:12:55.184422 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69f4262a_7eb3_4091_b103_393b9ab3a720.slice/crio-7ef5a044fb9280b9e442183bbdee0ebab9d8ce9e6b15a5b6173481be74a116a8 WatchSource:0}: Error finding container 7ef5a044fb9280b9e442183bbdee0ebab9d8ce9e6b15a5b6173481be74a116a8: Status 404 returned error can't find the container with id 7ef5a044fb9280b9e442183bbdee0ebab9d8ce9e6b15a5b6173481be74a116a8 Nov 26 07:12:55 crc kubenswrapper[4940]: W1126 07:12:55.195639 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ad552be_e28a_4873_a90e_867bd6efc437.slice/crio-8c6d4034a3f964bd13dd7c743ac51daf0afa6b9515a58240b18416a90549f7ea WatchSource:0}: Error finding container 8c6d4034a3f964bd13dd7c743ac51daf0afa6b9515a58240b18416a90549f7ea: Status 404 returned error can't find the container with id 8c6d4034a3f964bd13dd7c743ac51daf0afa6b9515a58240b18416a90549f7ea Nov 26 07:12:55 crc kubenswrapper[4940]: W1126 07:12:55.202760 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99d95f08_663c_4443_9a16_459f02985879.slice/crio-2cfe4a05145d2eca576f4252b72dc78b5a8874e64f01e501f560c894ffc01ebb WatchSource:0}: Error finding container 2cfe4a05145d2eca576f4252b72dc78b5a8874e64f01e501f560c894ffc01ebb: Status 404 returned error can't find the container with id 2cfe4a05145d2eca576f4252b72dc78b5a8874e64f01e501f560c894ffc01ebb Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.299096 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.305849 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.328712 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xsth\" (UniqueName: \"kubernetes.io/projected/575186d6-7607-4836-b9fa-87767e5a7da1-kube-api-access-8xsth\") pod \"575186d6-7607-4836-b9fa-87767e5a7da1\" (UID: \"575186d6-7607-4836-b9fa-87767e5a7da1\") " Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.328816 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/575186d6-7607-4836-b9fa-87767e5a7da1-config\") pod \"575186d6-7607-4836-b9fa-87767e5a7da1\" (UID: \"575186d6-7607-4836-b9fa-87767e5a7da1\") " Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.329317 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/575186d6-7607-4836-b9fa-87767e5a7da1-config" (OuterVolumeSpecName: "config") pod "575186d6-7607-4836-b9fa-87767e5a7da1" (UID: "575186d6-7607-4836-b9fa-87767e5a7da1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.334929 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/575186d6-7607-4836-b9fa-87767e5a7da1-kube-api-access-8xsth" (OuterVolumeSpecName: "kube-api-access-8xsth") pod "575186d6-7607-4836-b9fa-87767e5a7da1" (UID: "575186d6-7607-4836-b9fa-87767e5a7da1"). InnerVolumeSpecName "kube-api-access-8xsth". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.430343 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-config\") pod \"8d45d551-e818-4892-b721-b4e869f94589\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.430449 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9l7v\" (UniqueName: \"kubernetes.io/projected/8d45d551-e818-4892-b721-b4e869f94589-kube-api-access-r9l7v\") pod \"8d45d551-e818-4892-b721-b4e869f94589\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.430507 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-dns-svc\") pod \"8d45d551-e818-4892-b721-b4e869f94589\" (UID: \"8d45d551-e818-4892-b721-b4e869f94589\") " Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.430822 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-config" (OuterVolumeSpecName: "config") pod "8d45d551-e818-4892-b721-b4e869f94589" (UID: "8d45d551-e818-4892-b721-b4e869f94589"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.430914 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xsth\" (UniqueName: \"kubernetes.io/projected/575186d6-7607-4836-b9fa-87767e5a7da1-kube-api-access-8xsth\") on node \"crc\" DevicePath \"\"" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.430945 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/575186d6-7607-4836-b9fa-87767e5a7da1-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.431256 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8d45d551-e818-4892-b721-b4e869f94589" (UID: "8d45d551-e818-4892-b721-b4e869f94589"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.432790 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d45d551-e818-4892-b721-b4e869f94589-kube-api-access-r9l7v" (OuterVolumeSpecName: "kube-api-access-r9l7v") pod "8d45d551-e818-4892-b721-b4e869f94589" (UID: "8d45d551-e818-4892-b721-b4e869f94589"). InnerVolumeSpecName "kube-api-access-r9l7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.532161 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.532192 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9l7v\" (UniqueName: \"kubernetes.io/projected/8d45d551-e818-4892-b721-b4e869f94589-kube-api-access-r9l7v\") on node \"crc\" DevicePath \"\"" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.532202 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d45d551-e818-4892-b721-b4e869f94589-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.654005 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k9l7t" event={"ID":"5f26eaaa-63b0-491d-b664-56edff3be80c","Type":"ContainerStarted","Data":"7008a7f727d0ac277f063ee8b3bf7aff4869d564f571be3cdbf684b868433a99"} Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.655297 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"69f4262a-7eb3-4091-b103-393b9ab3a720","Type":"ContainerStarted","Data":"7ef5a044fb9280b9e442183bbdee0ebab9d8ce9e6b15a5b6173481be74a116a8"} Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.656670 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-556wf" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.656686 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-556wf" event={"ID":"8d45d551-e818-4892-b721-b4e869f94589","Type":"ContainerDied","Data":"518a766b4a334619ccbe9f590521d02db000869b4969d38dfe8010b581a29dab"} Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.657908 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g" event={"ID":"7ad552be-e28a-4873-a90e-867bd6efc437","Type":"ContainerStarted","Data":"8c6d4034a3f964bd13dd7c743ac51daf0afa6b9515a58240b18416a90549f7ea"} Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.660750 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" event={"ID":"575186d6-7607-4836-b9fa-87767e5a7da1","Type":"ContainerDied","Data":"76994cf10a6b79789ce44e8f225f77185584895891bd09a1847ae9471be401bb"} Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.660771 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-b8wg7" Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.664951 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"11a17576-9a94-4e2d-8915-9d838de09f0b","Type":"ContainerStarted","Data":"88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493"} Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.666651 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"99d95f08-663c-4443-9a16-459f02985879","Type":"ContainerStarted","Data":"2cfe4a05145d2eca576f4252b72dc78b5a8874e64f01e501f560c894ffc01ebb"} Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.667985 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"69972749-03ff-48e9-b031-99c33ce86e96","Type":"ContainerStarted","Data":"8e9e90c1c11a39ed3cb269a3561acbf16d889c8ed343e1427b9f27c9d40de9c6"} Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.754350 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-556wf"] Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.770866 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-556wf"] Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.791379 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-b8wg7"] Nov 26 07:12:55 crc kubenswrapper[4940]: I1126 07:12:55.796990 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-b8wg7"] Nov 26 07:12:57 crc kubenswrapper[4940]: I1126 07:12:57.176776 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="575186d6-7607-4836-b9fa-87767e5a7da1" path="/var/lib/kubelet/pods/575186d6-7607-4836-b9fa-87767e5a7da1/volumes" Nov 26 07:12:57 crc kubenswrapper[4940]: I1126 07:12:57.177692 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d45d551-e818-4892-b721-b4e869f94589" path="/var/lib/kubelet/pods/8d45d551-e818-4892-b721-b4e869f94589/volumes" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.723315 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"69f4262a-7eb3-4091-b103-393b9ab3a720","Type":"ContainerStarted","Data":"94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.725130 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a6c56309-82af-4734-a3d4-6c203fd5b23e","Type":"ContainerStarted","Data":"3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.726734 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g" event={"ID":"7ad552be-e28a-4873-a90e-867bd6efc437","Type":"ContainerStarted","Data":"e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.726933 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-78r7g" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.728743 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" event={"ID":"2bbaafab-23d1-4141-93cd-bdf1f4b3424e","Type":"ContainerStarted","Data":"009769f2da94464892748e4bbf10adf833eea134aa88e783fd67b4f9f58012a8"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.728888 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.730587 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"69f67fa7-ea74-4966-b69c-ab547896057e","Type":"ContainerStarted","Data":"f62286900f59e04e8b5935f72d22351db72e1e90c13dfd3c197b2183c1ddd5c0"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.730709 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.732107 4940 generic.go:334] "Generic (PLEG): container finished" podID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerID="5f4d42347e6909bca9c12b0af8c30a53b6a069559b2ae18120a71d8c27081a74" exitCode=0 Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.732135 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k9l7t" event={"ID":"5f26eaaa-63b0-491d-b664-56edff3be80c","Type":"ContainerDied","Data":"5f4d42347e6909bca9c12b0af8c30a53b6a069559b2ae18120a71d8c27081a74"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.734387 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3f53eb12-2c7d-4107-9d63-f0db8e983d90","Type":"ContainerStarted","Data":"4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.739360 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" event={"ID":"1d0a18f1-b7be-4221-be26-abfe4e69b3ed","Type":"ContainerStarted","Data":"6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.739603 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.745116 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"99d95f08-663c-4443-9a16-459f02985879","Type":"ContainerStarted","Data":"a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.754953 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7204b63f-239d-4f0e-96ce-1cf0ad85382a","Type":"ContainerStarted","Data":"87ece76ba7dc37fedad0f4dfdecb3831b0379e05c8f03d8362432bd1dd10fe9b"} Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.755477 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.777654 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" podStartSLOduration=-9223372009.077145 podStartE2EDuration="27.777631686s" podCreationTimestamp="2025-11-26 07:12:34 +0000 UTC" firstStartedPulling="2025-11-26 07:12:35.38083771 +0000 UTC m=+1056.900979329" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:01.77650225 +0000 UTC m=+1083.296643889" watchObservedRunningTime="2025-11-26 07:13:01.777631686 +0000 UTC m=+1083.297773295" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.841333 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" podStartSLOduration=11.481128256 podStartE2EDuration="27.841316206s" podCreationTimestamp="2025-11-26 07:12:34 +0000 UTC" firstStartedPulling="2025-11-26 07:12:35.137688449 +0000 UTC m=+1056.657830068" lastFinishedPulling="2025-11-26 07:12:51.497876409 +0000 UTC m=+1073.018018018" observedRunningTime="2025-11-26 07:13:01.839910452 +0000 UTC m=+1083.360052071" watchObservedRunningTime="2025-11-26 07:13:01.841316206 +0000 UTC m=+1083.361457825" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.866558 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-78r7g" podStartSLOduration=11.1221284 podStartE2EDuration="16.866539346s" podCreationTimestamp="2025-11-26 07:12:45 +0000 UTC" firstStartedPulling="2025-11-26 07:12:55.197642489 +0000 UTC m=+1076.717784118" lastFinishedPulling="2025-11-26 07:13:00.942053435 +0000 UTC m=+1082.462195064" observedRunningTime="2025-11-26 07:13:01.857317293 +0000 UTC m=+1083.377458922" watchObservedRunningTime="2025-11-26 07:13:01.866539346 +0000 UTC m=+1083.386680965" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.885951 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=15.024153397 podStartE2EDuration="23.885930111s" podCreationTimestamp="2025-11-26 07:12:38 +0000 UTC" firstStartedPulling="2025-11-26 07:12:52.004608851 +0000 UTC m=+1073.524750460" lastFinishedPulling="2025-11-26 07:13:00.866385555 +0000 UTC m=+1082.386527174" observedRunningTime="2025-11-26 07:13:01.879464827 +0000 UTC m=+1083.399606446" watchObservedRunningTime="2025-11-26 07:13:01.885930111 +0000 UTC m=+1083.406071730" Nov 26 07:13:01 crc kubenswrapper[4940]: I1126 07:13:01.896677 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=12.433786071 podStartE2EDuration="21.896658901s" podCreationTimestamp="2025-11-26 07:12:40 +0000 UTC" firstStartedPulling="2025-11-26 07:12:51.413749711 +0000 UTC m=+1072.933891370" lastFinishedPulling="2025-11-26 07:13:00.876622581 +0000 UTC m=+1082.396764200" observedRunningTime="2025-11-26 07:13:01.895505395 +0000 UTC m=+1083.415647014" watchObservedRunningTime="2025-11-26 07:13:01.896658901 +0000 UTC m=+1083.416800520" Nov 26 07:13:02 crc kubenswrapper[4940]: I1126 07:13:02.765380 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k9l7t" event={"ID":"5f26eaaa-63b0-491d-b664-56edff3be80c","Type":"ContainerStarted","Data":"b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf"} Nov 26 07:13:02 crc kubenswrapper[4940]: I1126 07:13:02.765868 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k9l7t" event={"ID":"5f26eaaa-63b0-491d-b664-56edff3be80c","Type":"ContainerStarted","Data":"3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee"} Nov 26 07:13:02 crc kubenswrapper[4940]: I1126 07:13:02.790306 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-k9l7t" podStartSLOduration=12.035628371 podStartE2EDuration="17.790282953s" podCreationTimestamp="2025-11-26 07:12:45 +0000 UTC" firstStartedPulling="2025-11-26 07:12:55.181165956 +0000 UTC m=+1076.701307575" lastFinishedPulling="2025-11-26 07:13:00.935820528 +0000 UTC m=+1082.455962157" observedRunningTime="2025-11-26 07:13:02.784498749 +0000 UTC m=+1084.304640368" watchObservedRunningTime="2025-11-26 07:13:02.790282953 +0000 UTC m=+1084.310424572" Nov 26 07:13:03 crc kubenswrapper[4940]: I1126 07:13:03.771951 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:13:03 crc kubenswrapper[4940]: I1126 07:13:03.772349 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:13:04 crc kubenswrapper[4940]: I1126 07:13:04.781561 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"99d95f08-663c-4443-9a16-459f02985879","Type":"ContainerStarted","Data":"a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13"} Nov 26 07:13:04 crc kubenswrapper[4940]: I1126 07:13:04.783227 4940 generic.go:334] "Generic (PLEG): container finished" podID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" containerID="4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b" exitCode=0 Nov 26 07:13:04 crc kubenswrapper[4940]: I1126 07:13:04.783322 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3f53eb12-2c7d-4107-9d63-f0db8e983d90","Type":"ContainerDied","Data":"4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b"} Nov 26 07:13:04 crc kubenswrapper[4940]: I1126 07:13:04.785937 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"69f4262a-7eb3-4091-b103-393b9ab3a720","Type":"ContainerStarted","Data":"fc517e7c3e97111fb38bbf8de532d3a15807567d75f6bc5c029091d6af85b9a5"} Nov 26 07:13:04 crc kubenswrapper[4940]: I1126 07:13:04.789459 4940 generic.go:334] "Generic (PLEG): container finished" podID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerID="3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc" exitCode=0 Nov 26 07:13:04 crc kubenswrapper[4940]: I1126 07:13:04.790156 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a6c56309-82af-4734-a3d4-6c203fd5b23e","Type":"ContainerDied","Data":"3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc"} Nov 26 07:13:04 crc kubenswrapper[4940]: I1126 07:13:04.800059 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=10.972083961 podStartE2EDuration="19.800017193s" podCreationTimestamp="2025-11-26 07:12:45 +0000 UTC" firstStartedPulling="2025-11-26 07:12:55.204683792 +0000 UTC m=+1076.724825401" lastFinishedPulling="2025-11-26 07:13:04.032617014 +0000 UTC m=+1085.552758633" observedRunningTime="2025-11-26 07:13:04.797056139 +0000 UTC m=+1086.317197768" watchObservedRunningTime="2025-11-26 07:13:04.800017193 +0000 UTC m=+1086.320158812" Nov 26 07:13:04 crc kubenswrapper[4940]: I1126 07:13:04.841853 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=8.986146485999999 podStartE2EDuration="17.841832619s" podCreationTimestamp="2025-11-26 07:12:47 +0000 UTC" firstStartedPulling="2025-11-26 07:12:55.186987411 +0000 UTC m=+1076.707129050" lastFinishedPulling="2025-11-26 07:13:04.042673564 +0000 UTC m=+1085.562815183" observedRunningTime="2025-11-26 07:13:04.839598999 +0000 UTC m=+1086.359740638" watchObservedRunningTime="2025-11-26 07:13:04.841832619 +0000 UTC m=+1086.361974238" Nov 26 07:13:05 crc kubenswrapper[4940]: I1126 07:13:05.798402 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3f53eb12-2c7d-4107-9d63-f0db8e983d90","Type":"ContainerStarted","Data":"6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab"} Nov 26 07:13:05 crc kubenswrapper[4940]: I1126 07:13:05.800421 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a6c56309-82af-4734-a3d4-6c203fd5b23e","Type":"ContainerStarted","Data":"e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47"} Nov 26 07:13:05 crc kubenswrapper[4940]: I1126 07:13:05.828032 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=19.863008368 podStartE2EDuration="28.828008707s" podCreationTimestamp="2025-11-26 07:12:37 +0000 UTC" firstStartedPulling="2025-11-26 07:12:51.908235504 +0000 UTC m=+1073.428377123" lastFinishedPulling="2025-11-26 07:13:00.873235843 +0000 UTC m=+1082.393377462" observedRunningTime="2025-11-26 07:13:05.827726418 +0000 UTC m=+1087.347868057" watchObservedRunningTime="2025-11-26 07:13:05.828008707 +0000 UTC m=+1087.348150356" Nov 26 07:13:05 crc kubenswrapper[4940]: I1126 07:13:05.849323 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=21.057715244 podStartE2EDuration="29.849300883s" podCreationTimestamp="2025-11-26 07:12:36 +0000 UTC" firstStartedPulling="2025-11-26 07:12:52.074786246 +0000 UTC m=+1073.594927865" lastFinishedPulling="2025-11-26 07:13:00.866371885 +0000 UTC m=+1082.386513504" observedRunningTime="2025-11-26 07:13:05.843588601 +0000 UTC m=+1087.363730220" watchObservedRunningTime="2025-11-26 07:13:05.849300883 +0000 UTC m=+1087.369442512" Nov 26 07:13:06 crc kubenswrapper[4940]: I1126 07:13:06.476099 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 26 07:13:06 crc kubenswrapper[4940]: I1126 07:13:06.611572 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 26 07:13:06 crc kubenswrapper[4940]: I1126 07:13:06.650476 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 26 07:13:06 crc kubenswrapper[4940]: I1126 07:13:06.809022 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 26 07:13:06 crc kubenswrapper[4940]: I1126 07:13:06.870079 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.113878 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-2lb2h"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.114433 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" podUID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" containerName="dnsmasq-dns" containerID="cri-o://6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269" gracePeriod=10 Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.115178 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.177297 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-65c9b8d4f7-lzxd9"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.178451 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.180453 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.180644 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-5fj2b"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.181612 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.186613 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.197012 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-5fj2b"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.220003 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65c9b8d4f7-lzxd9"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332531 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nl97d\" (UniqueName: \"kubernetes.io/projected/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-kube-api-access-nl97d\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332643 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3baa7ac-9221-47e0-afb0-25715f0e2491-config\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332677 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-combined-ca-bundle\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332707 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovs-rundir\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332729 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovn-rundir\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332792 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-ovsdbserver-sb\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332833 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-dns-svc\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332860 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332888 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p2jf\" (UniqueName: \"kubernetes.io/projected/e3baa7ac-9221-47e0-afb0-25715f0e2491-kube-api-access-9p2jf\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.332926 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-config\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434165 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-ovsdbserver-sb\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434223 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-dns-svc\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434260 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434283 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p2jf\" (UniqueName: \"kubernetes.io/projected/e3baa7ac-9221-47e0-afb0-25715f0e2491-kube-api-access-9p2jf\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434301 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-config\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434327 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nl97d\" (UniqueName: \"kubernetes.io/projected/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-kube-api-access-nl97d\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434376 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3baa7ac-9221-47e0-afb0-25715f0e2491-config\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434397 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-combined-ca-bundle\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovs-rundir\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.434429 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovn-rundir\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.435513 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-ovsdbserver-sb\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.436077 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-config\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.436195 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovs-rundir\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.436688 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-dns-svc\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.440209 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovn-rundir\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.445186 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3baa7ac-9221-47e0-afb0-25715f0e2491-config\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.445720 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-combined-ca-bundle\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.448593 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.459278 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d8746976c-vkf6q"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.460056 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" podUID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" containerName="dnsmasq-dns" containerID="cri-o://009769f2da94464892748e4bbf10adf833eea134aa88e783fd67b4f9f58012a8" gracePeriod=10 Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.479031 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.484392 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.487006 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c476d78c5-4qjbj"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.489879 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.494877 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nl97d\" (UniqueName: \"kubernetes.io/projected/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-kube-api-access-nl97d\") pod \"dnsmasq-dns-65c9b8d4f7-lzxd9\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.499766 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.500065 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p2jf\" (UniqueName: \"kubernetes.io/projected/e3baa7ac-9221-47e0-afb0-25715f0e2491-kube-api-access-9p2jf\") pod \"ovn-controller-metrics-5fj2b\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.509413 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.524011 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.526226 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c476d78c5-4qjbj"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.559529 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.633629 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.633840 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.638250 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxdvt\" (UniqueName: \"kubernetes.io/projected/aac5dfa7-6274-4567-afad-2618de0eb3a0-kube-api-access-pxdvt\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.638361 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-dns-svc\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.638452 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-config\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.638495 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.638564 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.656669 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.740276 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cc8ck\" (UniqueName: \"kubernetes.io/projected/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-kube-api-access-cc8ck\") pod \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.740748 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-config\") pod \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.741460 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-dns-svc\") pod \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\" (UID: \"1d0a18f1-b7be-4221-be26-abfe4e69b3ed\") " Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.741664 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.741740 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxdvt\" (UniqueName: \"kubernetes.io/projected/aac5dfa7-6274-4567-afad-2618de0eb3a0-kube-api-access-pxdvt\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.741848 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-dns-svc\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.741928 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-config\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.741962 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.742888 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.743179 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-dns-svc\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.743259 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.745804 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-config\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.748261 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-kube-api-access-cc8ck" (OuterVolumeSpecName: "kube-api-access-cc8ck") pod "1d0a18f1-b7be-4221-be26-abfe4e69b3ed" (UID: "1d0a18f1-b7be-4221-be26-abfe4e69b3ed"). InnerVolumeSpecName "kube-api-access-cc8ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.763719 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxdvt\" (UniqueName: \"kubernetes.io/projected/aac5dfa7-6274-4567-afad-2618de0eb3a0-kube-api-access-pxdvt\") pod \"dnsmasq-dns-5c476d78c5-4qjbj\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.781657 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-config" (OuterVolumeSpecName: "config") pod "1d0a18f1-b7be-4221-be26-abfe4e69b3ed" (UID: "1d0a18f1-b7be-4221-be26-abfe4e69b3ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.785773 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1d0a18f1-b7be-4221-be26-abfe4e69b3ed" (UID: "1d0a18f1-b7be-4221-be26-abfe4e69b3ed"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.821033 4940 generic.go:334] "Generic (PLEG): container finished" podID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" containerID="6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269" exitCode=0 Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.821131 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" event={"ID":"1d0a18f1-b7be-4221-be26-abfe4e69b3ed","Type":"ContainerDied","Data":"6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269"} Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.821167 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" event={"ID":"1d0a18f1-b7be-4221-be26-abfe4e69b3ed","Type":"ContainerDied","Data":"d66df7d22de3aaac8b32c3228fb1f302b170e8091f692454e7bfb71ba1607fb8"} Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.821187 4940 scope.go:117] "RemoveContainer" containerID="6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.821320 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-2lb2h" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.828381 4940 generic.go:334] "Generic (PLEG): container finished" podID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" containerID="009769f2da94464892748e4bbf10adf833eea134aa88e783fd67b4f9f58012a8" exitCode=0 Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.828575 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" event={"ID":"2bbaafab-23d1-4141-93cd-bdf1f4b3424e","Type":"ContainerDied","Data":"009769f2da94464892748e4bbf10adf833eea134aa88e783fd67b4f9f58012a8"} Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.844095 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.844132 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cc8ck\" (UniqueName: \"kubernetes.io/projected/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-kube-api-access-cc8ck\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.844145 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d0a18f1-b7be-4221-be26-abfe4e69b3ed-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.858796 4940 scope.go:117] "RemoveContainer" containerID="84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.859882 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-2lb2h"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.867987 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-2lb2h"] Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.874006 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.904507 4940 scope.go:117] "RemoveContainer" containerID="6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.905729 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:07 crc kubenswrapper[4940]: E1126 07:13:07.906714 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269\": container with ID starting with 6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269 not found: ID does not exist" containerID="6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.906778 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269"} err="failed to get container status \"6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269\": rpc error: code = NotFound desc = could not find container \"6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269\": container with ID starting with 6aeaf234a0627f30e959dc41db2a2968ee2c00ff20193fda96fe6c92d0b75269 not found: ID does not exist" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.906831 4940 scope.go:117] "RemoveContainer" containerID="84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720" Nov 26 07:13:07 crc kubenswrapper[4940]: E1126 07:13:07.909975 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720\": container with ID starting with 84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720 not found: ID does not exist" containerID="84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720" Nov 26 07:13:07 crc kubenswrapper[4940]: I1126 07:13:07.910644 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720"} err="failed to get container status \"84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720\": rpc error: code = NotFound desc = could not find container \"84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720\": container with ID starting with 84ce17b79823e7810f40f5ecb9f0b967d60efa7fb5fb778467757b13d4f30720 not found: ID does not exist" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.015492 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:13:08 crc kubenswrapper[4940]: E1126 07:13:08.016337 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" containerName="init" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.016355 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" containerName="init" Nov 26 07:13:08 crc kubenswrapper[4940]: E1126 07:13:08.016392 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" containerName="dnsmasq-dns" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.016402 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" containerName="dnsmasq-dns" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.023576 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" containerName="dnsmasq-dns" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.030280 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.040207 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-jvx2f" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.040756 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.048576 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.049012 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.055506 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.123335 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65c9b8d4f7-lzxd9"] Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.131501 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-5fj2b"] Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.156858 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.157216 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmtc9\" (UniqueName: \"kubernetes.io/projected/cda3e3c5-7a68-4269-8c15-b463b9263805-kube-api-access-vmtc9\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.157362 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.157485 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-scripts\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.157604 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.157754 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.157868 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-config\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.259121 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.259219 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.259255 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-config\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.259296 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.259336 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmtc9\" (UniqueName: \"kubernetes.io/projected/cda3e3c5-7a68-4269-8c15-b463b9263805-kube-api-access-vmtc9\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.259390 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-scripts\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.259415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.259934 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.260445 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-config\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.260577 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-scripts\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.265792 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.265794 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.276636 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.294333 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmtc9\" (UniqueName: \"kubernetes.io/projected/cda3e3c5-7a68-4269-8c15-b463b9263805-kube-api-access-vmtc9\") pod \"ovn-northd-0\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.351749 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c476d78c5-4qjbj"] Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.405320 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.557563 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.667174 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-dns-svc\") pod \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.667833 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-config\") pod \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.667887 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9njx8\" (UniqueName: \"kubernetes.io/projected/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-kube-api-access-9njx8\") pod \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\" (UID: \"2bbaafab-23d1-4141-93cd-bdf1f4b3424e\") " Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.671751 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-kube-api-access-9njx8" (OuterVolumeSpecName: "kube-api-access-9njx8") pod "2bbaafab-23d1-4141-93cd-bdf1f4b3424e" (UID: "2bbaafab-23d1-4141-93cd-bdf1f4b3424e"). InnerVolumeSpecName "kube-api-access-9njx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.722216 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-config" (OuterVolumeSpecName: "config") pod "2bbaafab-23d1-4141-93cd-bdf1f4b3424e" (UID: "2bbaafab-23d1-4141-93cd-bdf1f4b3424e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.724688 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2bbaafab-23d1-4141-93cd-bdf1f4b3424e" (UID: "2bbaafab-23d1-4141-93cd-bdf1f4b3424e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.769437 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.769477 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9njx8\" (UniqueName: \"kubernetes.io/projected/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-kube-api-access-9njx8\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.769490 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bbaafab-23d1-4141-93cd-bdf1f4b3424e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.839209 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-5fj2b" event={"ID":"e3baa7ac-9221-47e0-afb0-25715f0e2491","Type":"ContainerStarted","Data":"dd7562cc322c44e51d88e100358bb3ef6a12495c3a5c5238e4a92c094bcd8272"} Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.839248 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-5fj2b" event={"ID":"e3baa7ac-9221-47e0-afb0-25715f0e2491","Type":"ContainerStarted","Data":"6e422778bc134a6582edbf275bdb7d74591e12bd3473d45fd027375273493b1b"} Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.843915 4940 generic.go:334] "Generic (PLEG): container finished" podID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" containerID="3d6b083008cb90afe12933358b3febdfa89deef3f98dee35d88fde72ce0df2c4" exitCode=0 Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.844006 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" event={"ID":"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5","Type":"ContainerDied","Data":"3d6b083008cb90afe12933358b3febdfa89deef3f98dee35d88fde72ce0df2c4"} Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.844360 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" event={"ID":"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5","Type":"ContainerStarted","Data":"b6fb3a03307797491f06b578c3bfe79571ab62f6e98586f59ca06cd6f909d9cb"} Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.845735 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" event={"ID":"2bbaafab-23d1-4141-93cd-bdf1f4b3424e","Type":"ContainerDied","Data":"fe29d5a2b8f0b8f88259bbaa75be926931f1573746526c71461211e6b68a850d"} Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.845742 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d8746976c-vkf6q" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.845779 4940 scope.go:117] "RemoveContainer" containerID="009769f2da94464892748e4bbf10adf833eea134aa88e783fd67b4f9f58012a8" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.854718 4940 generic.go:334] "Generic (PLEG): container finished" podID="aac5dfa7-6274-4567-afad-2618de0eb3a0" containerID="2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c" exitCode=0 Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.855852 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" event={"ID":"aac5dfa7-6274-4567-afad-2618de0eb3a0","Type":"ContainerDied","Data":"2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c"} Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.855911 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" event={"ID":"aac5dfa7-6274-4567-afad-2618de0eb3a0","Type":"ContainerStarted","Data":"71a04b495b6f1f78eed58c7a0ef5b77160e97ec089770f06099ce30887ee4eb9"} Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.860297 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-5fj2b" podStartSLOduration=1.860273157 podStartE2EDuration="1.860273157s" podCreationTimestamp="2025-11-26 07:13:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:08.853170212 +0000 UTC m=+1090.373311841" watchObservedRunningTime="2025-11-26 07:13:08.860273157 +0000 UTC m=+1090.380414766" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.921262 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d8746976c-vkf6q"] Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.923216 4940 scope.go:117] "RemoveContainer" containerID="cf513408a889df5d299f00f39da6a8a61133d3da2ad562c3d244a5a1639b6546" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.947754 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d8746976c-vkf6q"] Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.969662 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.986568 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 26 07:13:08 crc kubenswrapper[4940]: I1126 07:13:08.986838 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.175067 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d0a18f1-b7be-4221-be26-abfe4e69b3ed" path="/var/lib/kubelet/pods/1d0a18f1-b7be-4221-be26-abfe4e69b3ed/volumes" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.175685 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" path="/var/lib/kubelet/pods/2bbaafab-23d1-4141-93cd-bdf1f4b3424e/volumes" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.305946 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.736166 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.807333 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.864402 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cda3e3c5-7a68-4269-8c15-b463b9263805","Type":"ContainerStarted","Data":"b2080648fccfaaedf3f9719008cb2addb4595068338c612c22c45e803dcb1324"} Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.868469 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" event={"ID":"aac5dfa7-6274-4567-afad-2618de0eb3a0","Type":"ContainerStarted","Data":"0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220"} Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.869395 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.870710 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" event={"ID":"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5","Type":"ContainerStarted","Data":"1631d07fcde213652d1c9ce10c41093a207924afd6347945441595061386bf12"} Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.871076 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.893969 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" podStartSLOduration=2.893951151 podStartE2EDuration="2.893951151s" podCreationTimestamp="2025-11-26 07:13:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:09.892530305 +0000 UTC m=+1091.412671934" watchObservedRunningTime="2025-11-26 07:13:09.893951151 +0000 UTC m=+1091.414092770" Nov 26 07:13:09 crc kubenswrapper[4940]: I1126 07:13:09.912924 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" podStartSLOduration=2.912905062 podStartE2EDuration="2.912905062s" podCreationTimestamp="2025-11-26 07:13:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:09.909529614 +0000 UTC m=+1091.429671233" watchObservedRunningTime="2025-11-26 07:13:09.912905062 +0000 UTC m=+1091.433046701" Nov 26 07:13:10 crc kubenswrapper[4940]: I1126 07:13:10.890329 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cda3e3c5-7a68-4269-8c15-b463b9263805","Type":"ContainerStarted","Data":"b0bf7e3ce23d8aabef777f75a95394afce2f910c3c79d9220de648b9329b628f"} Nov 26 07:13:10 crc kubenswrapper[4940]: I1126 07:13:10.890997 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 26 07:13:10 crc kubenswrapper[4940]: I1126 07:13:10.891016 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cda3e3c5-7a68-4269-8c15-b463b9263805","Type":"ContainerStarted","Data":"12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425"} Nov 26 07:13:10 crc kubenswrapper[4940]: I1126 07:13:10.917271 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.0584567480000002 podStartE2EDuration="3.917233644s" podCreationTimestamp="2025-11-26 07:13:07 +0000 UTC" firstStartedPulling="2025-11-26 07:13:09.006157354 +0000 UTC m=+1090.526298973" lastFinishedPulling="2025-11-26 07:13:09.86493425 +0000 UTC m=+1091.385075869" observedRunningTime="2025-11-26 07:13:10.912973109 +0000 UTC m=+1092.433114758" watchObservedRunningTime="2025-11-26 07:13:10.917233644 +0000 UTC m=+1092.437375263" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.281194 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.290110 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c476d78c5-4qjbj"] Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.330567 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9fdb784c-j22q2"] Nov 26 07:13:11 crc kubenswrapper[4940]: E1126 07:13:11.330902 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" containerName="init" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.330918 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" containerName="init" Nov 26 07:13:11 crc kubenswrapper[4940]: E1126 07:13:11.330939 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" containerName="dnsmasq-dns" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.330948 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" containerName="dnsmasq-dns" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.331113 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bbaafab-23d1-4141-93cd-bdf1f4b3424e" containerName="dnsmasq-dns" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.331876 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.352451 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9fdb784c-j22q2"] Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.418391 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djmbv\" (UniqueName: \"kubernetes.io/projected/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-kube-api-access-djmbv\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.418473 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.418516 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-config\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.418557 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.418576 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-dns-svc\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.520547 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.520603 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-dns-svc\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.520658 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djmbv\" (UniqueName: \"kubernetes.io/projected/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-kube-api-access-djmbv\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.520715 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.520755 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-config\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.521671 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.521681 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-config\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.521811 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-dns-svc\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.521821 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.541600 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djmbv\" (UniqueName: \"kubernetes.io/projected/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-kube-api-access-djmbv\") pod \"dnsmasq-dns-5c9fdb784c-j22q2\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:11 crc kubenswrapper[4940]: I1126 07:13:11.652669 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.080491 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9fdb784c-j22q2"] Nov 26 07:13:12 crc kubenswrapper[4940]: W1126 07:13:12.088197 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod878d8430_ecb8_4ed4_b6bd_3cd5681e5cea.slice/crio-8cb76cf9fa411ce2a5b3ca045b62bba519f1af37db464368466d6b965aba096b WatchSource:0}: Error finding container 8cb76cf9fa411ce2a5b3ca045b62bba519f1af37db464368466d6b965aba096b: Status 404 returned error can't find the container with id 8cb76cf9fa411ce2a5b3ca045b62bba519f1af37db464368466d6b965aba096b Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.425725 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.433180 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.434999 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.435379 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.436370 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.437011 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-frvxx" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.444409 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.536073 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.536134 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-cache\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.536160 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-lock\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.536186 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ttzj\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-kube-api-access-4ttzj\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.536251 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.637507 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ttzj\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-kube-api-access-4ttzj\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.637578 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.637684 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.637713 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-cache\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.637733 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-lock\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: E1126 07:13:12.637951 4940 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:13:12 crc kubenswrapper[4940]: E1126 07:13:12.637976 4940 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:13:12 crc kubenswrapper[4940]: E1126 07:13:12.638021 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift podName:1ae63b19-f186-430b-87f0-d058d2efa83c nodeName:}" failed. No retries permitted until 2025-11-26 07:13:13.13800342 +0000 UTC m=+1094.658145039 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift") pod "swift-storage-0" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c") : configmap "swift-ring-files" not found Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.638268 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-lock\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.638490 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-cache\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.638541 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.661817 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ttzj\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-kube-api-access-4ttzj\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.671349 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.906660 4940 generic.go:334] "Generic (PLEG): container finished" podID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" containerID="205e7fae91b54fb19fd64238f1d11edae2df9cf797148fffd87926e8138dfff6" exitCode=0 Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.906735 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" event={"ID":"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea","Type":"ContainerDied","Data":"205e7fae91b54fb19fd64238f1d11edae2df9cf797148fffd87926e8138dfff6"} Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.906786 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" event={"ID":"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea","Type":"ContainerStarted","Data":"8cb76cf9fa411ce2a5b3ca045b62bba519f1af37db464368466d6b965aba096b"} Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.906955 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" podUID="aac5dfa7-6274-4567-afad-2618de0eb3a0" containerName="dnsmasq-dns" containerID="cri-o://0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220" gracePeriod=10 Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.974777 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-f6s5k"] Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.976239 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.979884 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.980052 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 07:13:12 crc kubenswrapper[4940]: I1126 07:13:12.980174 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.002113 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-f6s5k"] Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.044929 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-combined-ca-bundle\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.045394 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-dispersionconf\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.045483 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-swiftconf\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.045533 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-scripts\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.045574 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b6691fc-2e13-47a2-86ff-cb5350301696-etc-swift\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.045605 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmt77\" (UniqueName: \"kubernetes.io/projected/9b6691fc-2e13-47a2-86ff-cb5350301696-kube-api-access-jmt77\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.045678 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-ring-data-devices\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.068307 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.149188 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-dispersionconf\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.149335 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.149407 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-swiftconf\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.149487 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-scripts\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.149520 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b6691fc-2e13-47a2-86ff-cb5350301696-etc-swift\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.149574 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmt77\" (UniqueName: \"kubernetes.io/projected/9b6691fc-2e13-47a2-86ff-cb5350301696-kube-api-access-jmt77\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: E1126 07:13:13.149632 4940 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.149653 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-ring-data-devices\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: E1126 07:13:13.149663 4940 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:13:13 crc kubenswrapper[4940]: E1126 07:13:13.149723 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift podName:1ae63b19-f186-430b-87f0-d058d2efa83c nodeName:}" failed. No retries permitted until 2025-11-26 07:13:14.149701118 +0000 UTC m=+1095.669842777 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift") pod "swift-storage-0" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c") : configmap "swift-ring-files" not found Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.149828 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-combined-ca-bundle\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.150538 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-ring-data-devices\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.151731 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b6691fc-2e13-47a2-86ff-cb5350301696-etc-swift\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.152323 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-scripts\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.154862 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-dispersionconf\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.156437 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.156594 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-swiftconf\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.156772 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-combined-ca-bundle\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.167297 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmt77\" (UniqueName: \"kubernetes.io/projected/9b6691fc-2e13-47a2-86ff-cb5350301696-kube-api-access-jmt77\") pod \"swift-ring-rebalance-f6s5k\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.312639 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.353594 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-dns-svc\") pod \"aac5dfa7-6274-4567-afad-2618de0eb3a0\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.353691 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-config\") pod \"aac5dfa7-6274-4567-afad-2618de0eb3a0\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.353812 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-sb\") pod \"aac5dfa7-6274-4567-afad-2618de0eb3a0\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.353865 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxdvt\" (UniqueName: \"kubernetes.io/projected/aac5dfa7-6274-4567-afad-2618de0eb3a0-kube-api-access-pxdvt\") pod \"aac5dfa7-6274-4567-afad-2618de0eb3a0\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.353956 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-nb\") pod \"aac5dfa7-6274-4567-afad-2618de0eb3a0\" (UID: \"aac5dfa7-6274-4567-afad-2618de0eb3a0\") " Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.357914 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aac5dfa7-6274-4567-afad-2618de0eb3a0-kube-api-access-pxdvt" (OuterVolumeSpecName: "kube-api-access-pxdvt") pod "aac5dfa7-6274-4567-afad-2618de0eb3a0" (UID: "aac5dfa7-6274-4567-afad-2618de0eb3a0"). InnerVolumeSpecName "kube-api-access-pxdvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.393332 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aac5dfa7-6274-4567-afad-2618de0eb3a0" (UID: "aac5dfa7-6274-4567-afad-2618de0eb3a0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.394285 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-config" (OuterVolumeSpecName: "config") pod "aac5dfa7-6274-4567-afad-2618de0eb3a0" (UID: "aac5dfa7-6274-4567-afad-2618de0eb3a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.396722 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aac5dfa7-6274-4567-afad-2618de0eb3a0" (UID: "aac5dfa7-6274-4567-afad-2618de0eb3a0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.398055 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aac5dfa7-6274-4567-afad-2618de0eb3a0" (UID: "aac5dfa7-6274-4567-afad-2618de0eb3a0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.398362 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.456384 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.456744 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.456753 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.456765 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxdvt\" (UniqueName: \"kubernetes.io/projected/aac5dfa7-6274-4567-afad-2618de0eb3a0-kube-api-access-pxdvt\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.456774 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac5dfa7-6274-4567-afad-2618de0eb3a0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.650072 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-f6s5k"] Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.917902 4940 generic.go:334] "Generic (PLEG): container finished" podID="aac5dfa7-6274-4567-afad-2618de0eb3a0" containerID="0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220" exitCode=0 Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.918176 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.918210 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" event={"ID":"aac5dfa7-6274-4567-afad-2618de0eb3a0","Type":"ContainerDied","Data":"0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220"} Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.918778 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c476d78c5-4qjbj" event={"ID":"aac5dfa7-6274-4567-afad-2618de0eb3a0","Type":"ContainerDied","Data":"71a04b495b6f1f78eed58c7a0ef5b77160e97ec089770f06099ce30887ee4eb9"} Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.918804 4940 scope.go:117] "RemoveContainer" containerID="0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.922678 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" event={"ID":"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea","Type":"ContainerStarted","Data":"f00555917e21bb3d03c30ccb5f3fd5dcc046bf9e7b4651c9f55fd59fc4e5e5d4"} Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.922766 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.923972 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-f6s5k" event={"ID":"9b6691fc-2e13-47a2-86ff-cb5350301696","Type":"ContainerStarted","Data":"4ac49d99a033ef3e364f952b756ecaf6f320c8664d88ac027e5ce9941cdf90b2"} Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.941403 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" podStartSLOduration=2.9413832859999998 podStartE2EDuration="2.941383286s" podCreationTimestamp="2025-11-26 07:13:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:13.939025682 +0000 UTC m=+1095.459167301" watchObservedRunningTime="2025-11-26 07:13:13.941383286 +0000 UTC m=+1095.461524905" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.955049 4940 scope.go:117] "RemoveContainer" containerID="2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.957212 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c476d78c5-4qjbj"] Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.962854 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c476d78c5-4qjbj"] Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.971437 4940 scope.go:117] "RemoveContainer" containerID="0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220" Nov 26 07:13:13 crc kubenswrapper[4940]: E1126 07:13:13.972637 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220\": container with ID starting with 0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220 not found: ID does not exist" containerID="0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.972671 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220"} err="failed to get container status \"0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220\": rpc error: code = NotFound desc = could not find container \"0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220\": container with ID starting with 0b723b2519b8e87eaa453dd4d86297d5c67863bde222fecd2da6bb88c0253220 not found: ID does not exist" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.972696 4940 scope.go:117] "RemoveContainer" containerID="2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c" Nov 26 07:13:13 crc kubenswrapper[4940]: E1126 07:13:13.973097 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c\": container with ID starting with 2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c not found: ID does not exist" containerID="2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c" Nov 26 07:13:13 crc kubenswrapper[4940]: I1126 07:13:13.973155 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c"} err="failed to get container status \"2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c\": rpc error: code = NotFound desc = could not find container \"2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c\": container with ID starting with 2a1e82f12be95b44e2024394dbdb0c01f3fe655d6d11457688e4d9c7d4e9302c not found: ID does not exist" Nov 26 07:13:14 crc kubenswrapper[4940]: I1126 07:13:14.168820 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:14 crc kubenswrapper[4940]: E1126 07:13:14.168997 4940 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:13:14 crc kubenswrapper[4940]: E1126 07:13:14.169019 4940 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:13:14 crc kubenswrapper[4940]: E1126 07:13:14.169079 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift podName:1ae63b19-f186-430b-87f0-d058d2efa83c nodeName:}" failed. No retries permitted until 2025-11-26 07:13:16.169060898 +0000 UTC m=+1097.689202517 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift") pod "swift-storage-0" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c") : configmap "swift-ring-files" not found Nov 26 07:13:15 crc kubenswrapper[4940]: I1126 07:13:15.176206 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aac5dfa7-6274-4567-afad-2618de0eb3a0" path="/var/lib/kubelet/pods/aac5dfa7-6274-4567-afad-2618de0eb3a0/volumes" Nov 26 07:13:16 crc kubenswrapper[4940]: I1126 07:13:16.199149 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:16 crc kubenswrapper[4940]: E1126 07:13:16.199370 4940 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:13:16 crc kubenswrapper[4940]: E1126 07:13:16.199389 4940 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:13:16 crc kubenswrapper[4940]: E1126 07:13:16.199448 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift podName:1ae63b19-f186-430b-87f0-d058d2efa83c nodeName:}" failed. No retries permitted until 2025-11-26 07:13:20.199434741 +0000 UTC m=+1101.719576360 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift") pod "swift-storage-0" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c") : configmap "swift-ring-files" not found Nov 26 07:13:16 crc kubenswrapper[4940]: I1126 07:13:16.963639 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-f6s5k" event={"ID":"9b6691fc-2e13-47a2-86ff-cb5350301696","Type":"ContainerStarted","Data":"314e4c503b3e92e3d40e785cc0e21ddd070fa647cdcb8a831a94293baea43f92"} Nov 26 07:13:16 crc kubenswrapper[4940]: I1126 07:13:16.981834 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-f6s5k" podStartSLOduration=2.034141978 podStartE2EDuration="4.981817005s" podCreationTimestamp="2025-11-26 07:13:12 +0000 UTC" firstStartedPulling="2025-11-26 07:13:13.6655917 +0000 UTC m=+1095.185733319" lastFinishedPulling="2025-11-26 07:13:16.613266717 +0000 UTC m=+1098.133408346" observedRunningTime="2025-11-26 07:13:16.977467578 +0000 UTC m=+1098.497609197" watchObservedRunningTime="2025-11-26 07:13:16.981817005 +0000 UTC m=+1098.501958624" Nov 26 07:13:17 crc kubenswrapper[4940]: I1126 07:13:17.512534 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:18 crc kubenswrapper[4940]: I1126 07:13:18.969698 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6b40-account-create-update-bch2t"] Nov 26 07:13:18 crc kubenswrapper[4940]: E1126 07:13:18.970481 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aac5dfa7-6274-4567-afad-2618de0eb3a0" containerName="init" Nov 26 07:13:18 crc kubenswrapper[4940]: I1126 07:13:18.970493 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="aac5dfa7-6274-4567-afad-2618de0eb3a0" containerName="init" Nov 26 07:13:18 crc kubenswrapper[4940]: E1126 07:13:18.970511 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aac5dfa7-6274-4567-afad-2618de0eb3a0" containerName="dnsmasq-dns" Nov 26 07:13:18 crc kubenswrapper[4940]: I1126 07:13:18.970517 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="aac5dfa7-6274-4567-afad-2618de0eb3a0" containerName="dnsmasq-dns" Nov 26 07:13:18 crc kubenswrapper[4940]: I1126 07:13:18.970855 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="aac5dfa7-6274-4567-afad-2618de0eb3a0" containerName="dnsmasq-dns" Nov 26 07:13:18 crc kubenswrapper[4940]: I1126 07:13:18.971402 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:18 crc kubenswrapper[4940]: I1126 07:13:18.983496 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.018498 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6b40-account-create-update-bch2t"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.040340 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-hnrhd"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.041736 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.047323 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af1db786-f3c4-4881-bd70-8be92ec0b24a-operator-scripts\") pod \"keystone-6b40-account-create-update-bch2t\" (UID: \"af1db786-f3c4-4881-bd70-8be92ec0b24a\") " pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.047394 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fb2242-083d-4a26-957a-0c4386c582c2-operator-scripts\") pod \"keystone-db-create-hnrhd\" (UID: \"a8fb2242-083d-4a26-957a-0c4386c582c2\") " pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.047434 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrwqs\" (UniqueName: \"kubernetes.io/projected/af1db786-f3c4-4881-bd70-8be92ec0b24a-kube-api-access-lrwqs\") pod \"keystone-6b40-account-create-update-bch2t\" (UID: \"af1db786-f3c4-4881-bd70-8be92ec0b24a\") " pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.047457 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8776f\" (UniqueName: \"kubernetes.io/projected/a8fb2242-083d-4a26-957a-0c4386c582c2-kube-api-access-8776f\") pod \"keystone-db-create-hnrhd\" (UID: \"a8fb2242-083d-4a26-957a-0c4386c582c2\") " pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.049826 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-hnrhd"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.148838 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrwqs\" (UniqueName: \"kubernetes.io/projected/af1db786-f3c4-4881-bd70-8be92ec0b24a-kube-api-access-lrwqs\") pod \"keystone-6b40-account-create-update-bch2t\" (UID: \"af1db786-f3c4-4881-bd70-8be92ec0b24a\") " pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.148894 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8776f\" (UniqueName: \"kubernetes.io/projected/a8fb2242-083d-4a26-957a-0c4386c582c2-kube-api-access-8776f\") pod \"keystone-db-create-hnrhd\" (UID: \"a8fb2242-083d-4a26-957a-0c4386c582c2\") " pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.149161 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af1db786-f3c4-4881-bd70-8be92ec0b24a-operator-scripts\") pod \"keystone-6b40-account-create-update-bch2t\" (UID: \"af1db786-f3c4-4881-bd70-8be92ec0b24a\") " pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.149252 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fb2242-083d-4a26-957a-0c4386c582c2-operator-scripts\") pod \"keystone-db-create-hnrhd\" (UID: \"a8fb2242-083d-4a26-957a-0c4386c582c2\") " pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.149987 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af1db786-f3c4-4881-bd70-8be92ec0b24a-operator-scripts\") pod \"keystone-6b40-account-create-update-bch2t\" (UID: \"af1db786-f3c4-4881-bd70-8be92ec0b24a\") " pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.150171 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fb2242-083d-4a26-957a-0c4386c582c2-operator-scripts\") pod \"keystone-db-create-hnrhd\" (UID: \"a8fb2242-083d-4a26-957a-0c4386c582c2\") " pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.169672 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrwqs\" (UniqueName: \"kubernetes.io/projected/af1db786-f3c4-4881-bd70-8be92ec0b24a-kube-api-access-lrwqs\") pod \"keystone-6b40-account-create-update-bch2t\" (UID: \"af1db786-f3c4-4881-bd70-8be92ec0b24a\") " pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.170643 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8776f\" (UniqueName: \"kubernetes.io/projected/a8fb2242-083d-4a26-957a-0c4386c582c2-kube-api-access-8776f\") pod \"keystone-db-create-hnrhd\" (UID: \"a8fb2242-083d-4a26-957a-0c4386c582c2\") " pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.208554 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-h6bf9"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.209877 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.217895 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-h6bf9"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.250825 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm45n\" (UniqueName: \"kubernetes.io/projected/f4b0f8bd-b95a-4d10-8747-11c0586a710c-kube-api-access-mm45n\") pod \"placement-db-create-h6bf9\" (UID: \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\") " pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.250902 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4b0f8bd-b95a-4d10-8747-11c0586a710c-operator-scripts\") pod \"placement-db-create-h6bf9\" (UID: \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\") " pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.308119 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-f807-account-create-update-7xz9t"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.309598 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.311505 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.316882 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f807-account-create-update-7xz9t"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.325554 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.352631 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mm45n\" (UniqueName: \"kubernetes.io/projected/f4b0f8bd-b95a-4d10-8747-11c0586a710c-kube-api-access-mm45n\") pod \"placement-db-create-h6bf9\" (UID: \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\") " pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.352730 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4b0f8bd-b95a-4d10-8747-11c0586a710c-operator-scripts\") pod \"placement-db-create-h6bf9\" (UID: \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\") " pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.369894 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm45n\" (UniqueName: \"kubernetes.io/projected/f4b0f8bd-b95a-4d10-8747-11c0586a710c-kube-api-access-mm45n\") pod \"placement-db-create-h6bf9\" (UID: \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\") " pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.371749 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.396922 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4b0f8bd-b95a-4d10-8747-11c0586a710c-operator-scripts\") pod \"placement-db-create-h6bf9\" (UID: \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\") " pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.454258 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50646a66-7d38-481f-9f16-e33c0de6ac84-operator-scripts\") pod \"placement-f807-account-create-update-7xz9t\" (UID: \"50646a66-7d38-481f-9f16-e33c0de6ac84\") " pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.455313 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcxqs\" (UniqueName: \"kubernetes.io/projected/50646a66-7d38-481f-9f16-e33c0de6ac84-kube-api-access-qcxqs\") pod \"placement-f807-account-create-update-7xz9t\" (UID: \"50646a66-7d38-481f-9f16-e33c0de6ac84\") " pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.512652 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-p24mk"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.513919 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-p24mk" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.541071 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-p24mk"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.556639 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxqs\" (UniqueName: \"kubernetes.io/projected/50646a66-7d38-481f-9f16-e33c0de6ac84-kube-api-access-qcxqs\") pod \"placement-f807-account-create-update-7xz9t\" (UID: \"50646a66-7d38-481f-9f16-e33c0de6ac84\") " pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.556973 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50646a66-7d38-481f-9f16-e33c0de6ac84-operator-scripts\") pod \"placement-f807-account-create-update-7xz9t\" (UID: \"50646a66-7d38-481f-9f16-e33c0de6ac84\") " pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.558467 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50646a66-7d38-481f-9f16-e33c0de6ac84-operator-scripts\") pod \"placement-f807-account-create-update-7xz9t\" (UID: \"50646a66-7d38-481f-9f16-e33c0de6ac84\") " pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.566482 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.573250 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcxqs\" (UniqueName: \"kubernetes.io/projected/50646a66-7d38-481f-9f16-e33c0de6ac84-kube-api-access-qcxqs\") pod \"placement-f807-account-create-update-7xz9t\" (UID: \"50646a66-7d38-481f-9f16-e33c0de6ac84\") " pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.617520 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-6413-account-create-update-xg5vm"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.620481 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.624378 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.624475 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.626924 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6413-account-create-update-xg5vm"] Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.658257 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-operator-scripts\") pod \"glance-db-create-p24mk\" (UID: \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\") " pod="openstack/glance-db-create-p24mk" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.659358 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp29v\" (UniqueName: \"kubernetes.io/projected/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-kube-api-access-bp29v\") pod \"glance-db-create-p24mk\" (UID: \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\") " pod="openstack/glance-db-create-p24mk" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.760737 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9twcf\" (UniqueName: \"kubernetes.io/projected/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-kube-api-access-9twcf\") pod \"glance-6413-account-create-update-xg5vm\" (UID: \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\") " pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.761256 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-operator-scripts\") pod \"glance-6413-account-create-update-xg5vm\" (UID: \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\") " pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.761420 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-operator-scripts\") pod \"glance-db-create-p24mk\" (UID: \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\") " pod="openstack/glance-db-create-p24mk" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.761582 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bp29v\" (UniqueName: \"kubernetes.io/projected/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-kube-api-access-bp29v\") pod \"glance-db-create-p24mk\" (UID: \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\") " pod="openstack/glance-db-create-p24mk" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.762606 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-operator-scripts\") pod \"glance-db-create-p24mk\" (UID: \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\") " pod="openstack/glance-db-create-p24mk" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.783678 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp29v\" (UniqueName: \"kubernetes.io/projected/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-kube-api-access-bp29v\") pod \"glance-db-create-p24mk\" (UID: \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\") " pod="openstack/glance-db-create-p24mk" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.806420 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6b40-account-create-update-bch2t"] Nov 26 07:13:19 crc kubenswrapper[4940]: W1126 07:13:19.816347 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf1db786_f3c4_4881_bd70_8be92ec0b24a.slice/crio-701fd99c60ebd5de2234b544a2a33db5179297564860bf8ccc218d2eb0f00531 WatchSource:0}: Error finding container 701fd99c60ebd5de2234b544a2a33db5179297564860bf8ccc218d2eb0f00531: Status 404 returned error can't find the container with id 701fd99c60ebd5de2234b544a2a33db5179297564860bf8ccc218d2eb0f00531 Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.861916 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-p24mk" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.862768 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-operator-scripts\") pod \"glance-6413-account-create-update-xg5vm\" (UID: \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\") " pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.862894 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9twcf\" (UniqueName: \"kubernetes.io/projected/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-kube-api-access-9twcf\") pod \"glance-6413-account-create-update-xg5vm\" (UID: \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\") " pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.865219 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-operator-scripts\") pod \"glance-6413-account-create-update-xg5vm\" (UID: \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\") " pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.890280 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9twcf\" (UniqueName: \"kubernetes.io/projected/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-kube-api-access-9twcf\") pod \"glance-6413-account-create-update-xg5vm\" (UID: \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\") " pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.894962 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-hnrhd"] Nov 26 07:13:19 crc kubenswrapper[4940]: W1126 07:13:19.898144 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8fb2242_083d_4a26_957a_0c4386c582c2.slice/crio-0f61e1fcebb35977d3f2a6ffcd886ecfc5b47231425525c64e8ac1ce9358196c WatchSource:0}: Error finding container 0f61e1fcebb35977d3f2a6ffcd886ecfc5b47231425525c64e8ac1ce9358196c: Status 404 returned error can't find the container with id 0f61e1fcebb35977d3f2a6ffcd886ecfc5b47231425525c64e8ac1ce9358196c Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.937444 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:19 crc kubenswrapper[4940]: I1126 07:13:19.991885 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-h6bf9"] Nov 26 07:13:20 crc kubenswrapper[4940]: I1126 07:13:20.003190 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6b40-account-create-update-bch2t" event={"ID":"af1db786-f3c4-4881-bd70-8be92ec0b24a","Type":"ContainerStarted","Data":"701fd99c60ebd5de2234b544a2a33db5179297564860bf8ccc218d2eb0f00531"} Nov 26 07:13:20 crc kubenswrapper[4940]: I1126 07:13:20.016105 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-hnrhd" event={"ID":"a8fb2242-083d-4a26-957a-0c4386c582c2","Type":"ContainerStarted","Data":"0f61e1fcebb35977d3f2a6ffcd886ecfc5b47231425525c64e8ac1ce9358196c"} Nov 26 07:13:20 crc kubenswrapper[4940]: I1126 07:13:20.080312 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f807-account-create-update-7xz9t"] Nov 26 07:13:20 crc kubenswrapper[4940]: W1126 07:13:20.108137 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50646a66_7d38_481f_9f16_e33c0de6ac84.slice/crio-5288163c12ad9981848b182bdcae4a0719d5f4cb0b2d9aa06305e976a21f4588 WatchSource:0}: Error finding container 5288163c12ad9981848b182bdcae4a0719d5f4cb0b2d9aa06305e976a21f4588: Status 404 returned error can't find the container with id 5288163c12ad9981848b182bdcae4a0719d5f4cb0b2d9aa06305e976a21f4588 Nov 26 07:13:20 crc kubenswrapper[4940]: I1126 07:13:20.273830 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:20 crc kubenswrapper[4940]: E1126 07:13:20.274015 4940 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:13:20 crc kubenswrapper[4940]: E1126 07:13:20.274076 4940 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:13:20 crc kubenswrapper[4940]: E1126 07:13:20.274139 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift podName:1ae63b19-f186-430b-87f0-d058d2efa83c nodeName:}" failed. No retries permitted until 2025-11-26 07:13:28.274119692 +0000 UTC m=+1109.794261311 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift") pod "swift-storage-0" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c") : configmap "swift-ring-files" not found Nov 26 07:13:20 crc kubenswrapper[4940]: I1126 07:13:20.334265 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-p24mk"] Nov 26 07:13:20 crc kubenswrapper[4940]: I1126 07:13:20.431330 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6413-account-create-update-xg5vm"] Nov 26 07:13:20 crc kubenswrapper[4940]: W1126 07:13:20.456078 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7a24b6b_c7c6_4710_a0a9_1f2730c7c333.slice/crio-0a83f5dfc909f815d1c62d9518bd49b71a56493b894283d704d7747a2c3a8a5a WatchSource:0}: Error finding container 0a83f5dfc909f815d1c62d9518bd49b71a56493b894283d704d7747a2c3a8a5a: Status 404 returned error can't find the container with id 0a83f5dfc909f815d1c62d9518bd49b71a56493b894283d704d7747a2c3a8a5a Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.029749 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h6bf9" event={"ID":"f4b0f8bd-b95a-4d10-8747-11c0586a710c","Type":"ContainerStarted","Data":"28e2a242740bd54e7e70c7584d5427f94678a06152e93baf68d358346b444d34"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.030338 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h6bf9" event={"ID":"f4b0f8bd-b95a-4d10-8747-11c0586a710c","Type":"ContainerStarted","Data":"26c00c7c62322eb8d05edbb9322f1ecbdbada7284633d679576ebd5e048acc3b"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.032491 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f807-account-create-update-7xz9t" event={"ID":"50646a66-7d38-481f-9f16-e33c0de6ac84","Type":"ContainerStarted","Data":"06eb10118d6f67ed17dc4673b5c0fe39fb295a586f54890566fc42f673a7a34f"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.032558 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f807-account-create-update-7xz9t" event={"ID":"50646a66-7d38-481f-9f16-e33c0de6ac84","Type":"ContainerStarted","Data":"5288163c12ad9981848b182bdcae4a0719d5f4cb0b2d9aa06305e976a21f4588"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.037667 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6413-account-create-update-xg5vm" event={"ID":"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333","Type":"ContainerStarted","Data":"bca968db600092021a60c505e8480f04c4ab1d2e4deffaa260a8603ed550dbac"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.037720 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6413-account-create-update-xg5vm" event={"ID":"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333","Type":"ContainerStarted","Data":"0a83f5dfc909f815d1c62d9518bd49b71a56493b894283d704d7747a2c3a8a5a"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.039770 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6b40-account-create-update-bch2t" event={"ID":"af1db786-f3c4-4881-bd70-8be92ec0b24a","Type":"ContainerStarted","Data":"af39f43206ed9e8c3359edcad03f5b9e9c03901dffc2f65deec2edf61fc17246"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.046297 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-hnrhd" event={"ID":"a8fb2242-083d-4a26-957a-0c4386c582c2","Type":"ContainerStarted","Data":"ad46d95e4b31516777f6595e28850d4fd7fe0cf1601ab7e6e62e24919711b1e3"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.048279 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-h6bf9" podStartSLOduration=2.048254485 podStartE2EDuration="2.048254485s" podCreationTimestamp="2025-11-26 07:13:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:21.045728874 +0000 UTC m=+1102.565870503" watchObservedRunningTime="2025-11-26 07:13:21.048254485 +0000 UTC m=+1102.568396114" Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.049020 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-p24mk" event={"ID":"57dd2efa-d7e3-4ea2-98be-7cdb13472a59","Type":"ContainerStarted","Data":"b6e397d9a1c3b9e439eaee34e7f6ab2d5cca1c4d5672b6ac2c308c39f31ba662"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.049087 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-p24mk" event={"ID":"57dd2efa-d7e3-4ea2-98be-7cdb13472a59","Type":"ContainerStarted","Data":"212cb02cb2b38681f24bf7e206776182390229ce8708b379af336fcad9b99ed8"} Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.066285 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6b40-account-create-update-bch2t" podStartSLOduration=3.066270385 podStartE2EDuration="3.066270385s" podCreationTimestamp="2025-11-26 07:13:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:21.059018776 +0000 UTC m=+1102.579160425" watchObservedRunningTime="2025-11-26 07:13:21.066270385 +0000 UTC m=+1102.586412004" Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.080591 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-6413-account-create-update-xg5vm" podStartSLOduration=2.080562399 podStartE2EDuration="2.080562399s" podCreationTimestamp="2025-11-26 07:13:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:21.076252933 +0000 UTC m=+1102.596394552" watchObservedRunningTime="2025-11-26 07:13:21.080562399 +0000 UTC m=+1102.600704028" Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.093508 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-f807-account-create-update-7xz9t" podStartSLOduration=2.093490198 podStartE2EDuration="2.093490198s" podCreationTimestamp="2025-11-26 07:13:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:21.088736708 +0000 UTC m=+1102.608878337" watchObservedRunningTime="2025-11-26 07:13:21.093490198 +0000 UTC m=+1102.613631817" Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.114941 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-p24mk" podStartSLOduration=2.114920869 podStartE2EDuration="2.114920869s" podCreationTimestamp="2025-11-26 07:13:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:21.105238381 +0000 UTC m=+1102.625380000" watchObservedRunningTime="2025-11-26 07:13:21.114920869 +0000 UTC m=+1102.635062488" Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.124906 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-hnrhd" podStartSLOduration=2.124889725 podStartE2EDuration="2.124889725s" podCreationTimestamp="2025-11-26 07:13:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:21.123391277 +0000 UTC m=+1102.643532966" watchObservedRunningTime="2025-11-26 07:13:21.124889725 +0000 UTC m=+1102.645031344" Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.655338 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.720733 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c9b8d4f7-lzxd9"] Nov 26 07:13:21 crc kubenswrapper[4940]: I1126 07:13:21.720991 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" podUID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" containerName="dnsmasq-dns" containerID="cri-o://1631d07fcde213652d1c9ce10c41093a207924afd6347945441595061386bf12" gracePeriod=10 Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.059549 4940 generic.go:334] "Generic (PLEG): container finished" podID="50646a66-7d38-481f-9f16-e33c0de6ac84" containerID="06eb10118d6f67ed17dc4673b5c0fe39fb295a586f54890566fc42f673a7a34f" exitCode=0 Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.059698 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f807-account-create-update-7xz9t" event={"ID":"50646a66-7d38-481f-9f16-e33c0de6ac84","Type":"ContainerDied","Data":"06eb10118d6f67ed17dc4673b5c0fe39fb295a586f54890566fc42f673a7a34f"} Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.065340 4940 generic.go:334] "Generic (PLEG): container finished" podID="d7a24b6b-c7c6-4710-a0a9-1f2730c7c333" containerID="bca968db600092021a60c505e8480f04c4ab1d2e4deffaa260a8603ed550dbac" exitCode=0 Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.065390 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6413-account-create-update-xg5vm" event={"ID":"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333","Type":"ContainerDied","Data":"bca968db600092021a60c505e8480f04c4ab1d2e4deffaa260a8603ed550dbac"} Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.069350 4940 generic.go:334] "Generic (PLEG): container finished" podID="af1db786-f3c4-4881-bd70-8be92ec0b24a" containerID="af39f43206ed9e8c3359edcad03f5b9e9c03901dffc2f65deec2edf61fc17246" exitCode=0 Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.069442 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6b40-account-create-update-bch2t" event={"ID":"af1db786-f3c4-4881-bd70-8be92ec0b24a","Type":"ContainerDied","Data":"af39f43206ed9e8c3359edcad03f5b9e9c03901dffc2f65deec2edf61fc17246"} Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.077551 4940 generic.go:334] "Generic (PLEG): container finished" podID="a8fb2242-083d-4a26-957a-0c4386c582c2" containerID="ad46d95e4b31516777f6595e28850d4fd7fe0cf1601ab7e6e62e24919711b1e3" exitCode=0 Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.077648 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-hnrhd" event={"ID":"a8fb2242-083d-4a26-957a-0c4386c582c2","Type":"ContainerDied","Data":"ad46d95e4b31516777f6595e28850d4fd7fe0cf1601ab7e6e62e24919711b1e3"} Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.083707 4940 generic.go:334] "Generic (PLEG): container finished" podID="57dd2efa-d7e3-4ea2-98be-7cdb13472a59" containerID="b6e397d9a1c3b9e439eaee34e7f6ab2d5cca1c4d5672b6ac2c308c39f31ba662" exitCode=0 Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.083850 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-p24mk" event={"ID":"57dd2efa-d7e3-4ea2-98be-7cdb13472a59","Type":"ContainerDied","Data":"b6e397d9a1c3b9e439eaee34e7f6ab2d5cca1c4d5672b6ac2c308c39f31ba662"} Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.086600 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b0f8bd-b95a-4d10-8747-11c0586a710c" containerID="28e2a242740bd54e7e70c7584d5427f94678a06152e93baf68d358346b444d34" exitCode=0 Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.086670 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h6bf9" event={"ID":"f4b0f8bd-b95a-4d10-8747-11c0586a710c","Type":"ContainerDied","Data":"28e2a242740bd54e7e70c7584d5427f94678a06152e93baf68d358346b444d34"} Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.095482 4940 generic.go:334] "Generic (PLEG): container finished" podID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" containerID="1631d07fcde213652d1c9ce10c41093a207924afd6347945441595061386bf12" exitCode=0 Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.095525 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" event={"ID":"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5","Type":"ContainerDied","Data":"1631d07fcde213652d1c9ce10c41093a207924afd6347945441595061386bf12"} Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.184980 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.313144 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-ovsdbserver-sb\") pod \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.313228 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-config\") pod \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.313271 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nl97d\" (UniqueName: \"kubernetes.io/projected/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-kube-api-access-nl97d\") pod \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.313322 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-dns-svc\") pod \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\" (UID: \"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5\") " Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.337101 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-kube-api-access-nl97d" (OuterVolumeSpecName: "kube-api-access-nl97d") pod "2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" (UID: "2a12a421-4a7d-4a34-a986-5dd3fa5edfb5"). InnerVolumeSpecName "kube-api-access-nl97d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.368398 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-config" (OuterVolumeSpecName: "config") pod "2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" (UID: "2a12a421-4a7d-4a34-a986-5dd3fa5edfb5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.369518 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" (UID: "2a12a421-4a7d-4a34-a986-5dd3fa5edfb5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.372297 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" (UID: "2a12a421-4a7d-4a34-a986-5dd3fa5edfb5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.415063 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.415103 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.415114 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nl97d\" (UniqueName: \"kubernetes.io/projected/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-kube-api-access-nl97d\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:22 crc kubenswrapper[4940]: I1126 07:13:22.415125 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.111575 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" event={"ID":"2a12a421-4a7d-4a34-a986-5dd3fa5edfb5","Type":"ContainerDied","Data":"b6fb3a03307797491f06b578c3bfe79571ab62f6e98586f59ca06cd6f909d9cb"} Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.112117 4940 scope.go:117] "RemoveContainer" containerID="1631d07fcde213652d1c9ce10c41093a207924afd6347945441595061386bf12" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.111852 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c9b8d4f7-lzxd9" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.164792 4940 scope.go:117] "RemoveContainer" containerID="3d6b083008cb90afe12933358b3febdfa89deef3f98dee35d88fde72ce0df2c4" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.181264 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c9b8d4f7-lzxd9"] Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.181310 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-65c9b8d4f7-lzxd9"] Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.471008 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.566632 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.672791 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.682766 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-p24mk" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.706926 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.727809 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.729064 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.750212 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fb2242-083d-4a26-957a-0c4386c582c2-operator-scripts\") pod \"a8fb2242-083d-4a26-957a-0c4386c582c2\" (UID: \"a8fb2242-083d-4a26-957a-0c4386c582c2\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.750512 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8776f\" (UniqueName: \"kubernetes.io/projected/a8fb2242-083d-4a26-957a-0c4386c582c2-kube-api-access-8776f\") pod \"a8fb2242-083d-4a26-957a-0c4386c582c2\" (UID: \"a8fb2242-083d-4a26-957a-0c4386c582c2\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.750663 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8fb2242-083d-4a26-957a-0c4386c582c2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a8fb2242-083d-4a26-957a-0c4386c582c2" (UID: "a8fb2242-083d-4a26-957a-0c4386c582c2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.751105 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fb2242-083d-4a26-957a-0c4386c582c2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.760925 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8fb2242-083d-4a26-957a-0c4386c582c2-kube-api-access-8776f" (OuterVolumeSpecName: "kube-api-access-8776f") pod "a8fb2242-083d-4a26-957a-0c4386c582c2" (UID: "a8fb2242-083d-4a26-957a-0c4386c582c2"). InnerVolumeSpecName "kube-api-access-8776f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852630 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af1db786-f3c4-4881-bd70-8be92ec0b24a-operator-scripts\") pod \"af1db786-f3c4-4881-bd70-8be92ec0b24a\" (UID: \"af1db786-f3c4-4881-bd70-8be92ec0b24a\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852744 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcxqs\" (UniqueName: \"kubernetes.io/projected/50646a66-7d38-481f-9f16-e33c0de6ac84-kube-api-access-qcxqs\") pod \"50646a66-7d38-481f-9f16-e33c0de6ac84\" (UID: \"50646a66-7d38-481f-9f16-e33c0de6ac84\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852775 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrwqs\" (UniqueName: \"kubernetes.io/projected/af1db786-f3c4-4881-bd70-8be92ec0b24a-kube-api-access-lrwqs\") pod \"af1db786-f3c4-4881-bd70-8be92ec0b24a\" (UID: \"af1db786-f3c4-4881-bd70-8be92ec0b24a\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852822 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-operator-scripts\") pod \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\" (UID: \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852848 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50646a66-7d38-481f-9f16-e33c0de6ac84-operator-scripts\") pod \"50646a66-7d38-481f-9f16-e33c0de6ac84\" (UID: \"50646a66-7d38-481f-9f16-e33c0de6ac84\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852900 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9twcf\" (UniqueName: \"kubernetes.io/projected/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-kube-api-access-9twcf\") pod \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\" (UID: \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852916 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-operator-scripts\") pod \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\" (UID: \"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852945 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mm45n\" (UniqueName: \"kubernetes.io/projected/f4b0f8bd-b95a-4d10-8747-11c0586a710c-kube-api-access-mm45n\") pod \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\" (UID: \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.852995 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4b0f8bd-b95a-4d10-8747-11c0586a710c-operator-scripts\") pod \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\" (UID: \"f4b0f8bd-b95a-4d10-8747-11c0586a710c\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.853015 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bp29v\" (UniqueName: \"kubernetes.io/projected/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-kube-api-access-bp29v\") pod \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\" (UID: \"57dd2efa-d7e3-4ea2-98be-7cdb13472a59\") " Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.853335 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8776f\" (UniqueName: \"kubernetes.io/projected/a8fb2242-083d-4a26-957a-0c4386c582c2-kube-api-access-8776f\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.853365 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d7a24b6b-c7c6-4710-a0a9-1f2730c7c333" (UID: "d7a24b6b-c7c6-4710-a0a9-1f2730c7c333"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.853364 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50646a66-7d38-481f-9f16-e33c0de6ac84-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "50646a66-7d38-481f-9f16-e33c0de6ac84" (UID: "50646a66-7d38-481f-9f16-e33c0de6ac84"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.853828 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af1db786-f3c4-4881-bd70-8be92ec0b24a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "af1db786-f3c4-4881-bd70-8be92ec0b24a" (UID: "af1db786-f3c4-4881-bd70-8be92ec0b24a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.854106 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4b0f8bd-b95a-4d10-8747-11c0586a710c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f4b0f8bd-b95a-4d10-8747-11c0586a710c" (UID: "f4b0f8bd-b95a-4d10-8747-11c0586a710c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.854406 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "57dd2efa-d7e3-4ea2-98be-7cdb13472a59" (UID: "57dd2efa-d7e3-4ea2-98be-7cdb13472a59"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.858443 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-kube-api-access-9twcf" (OuterVolumeSpecName: "kube-api-access-9twcf") pod "d7a24b6b-c7c6-4710-a0a9-1f2730c7c333" (UID: "d7a24b6b-c7c6-4710-a0a9-1f2730c7c333"). InnerVolumeSpecName "kube-api-access-9twcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.858487 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-kube-api-access-bp29v" (OuterVolumeSpecName: "kube-api-access-bp29v") pod "57dd2efa-d7e3-4ea2-98be-7cdb13472a59" (UID: "57dd2efa-d7e3-4ea2-98be-7cdb13472a59"). InnerVolumeSpecName "kube-api-access-bp29v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.858514 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4b0f8bd-b95a-4d10-8747-11c0586a710c-kube-api-access-mm45n" (OuterVolumeSpecName: "kube-api-access-mm45n") pod "f4b0f8bd-b95a-4d10-8747-11c0586a710c" (UID: "f4b0f8bd-b95a-4d10-8747-11c0586a710c"). InnerVolumeSpecName "kube-api-access-mm45n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.858560 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50646a66-7d38-481f-9f16-e33c0de6ac84-kube-api-access-qcxqs" (OuterVolumeSpecName: "kube-api-access-qcxqs") pod "50646a66-7d38-481f-9f16-e33c0de6ac84" (UID: "50646a66-7d38-481f-9f16-e33c0de6ac84"). InnerVolumeSpecName "kube-api-access-qcxqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.861298 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af1db786-f3c4-4881-bd70-8be92ec0b24a-kube-api-access-lrwqs" (OuterVolumeSpecName: "kube-api-access-lrwqs") pod "af1db786-f3c4-4881-bd70-8be92ec0b24a" (UID: "af1db786-f3c4-4881-bd70-8be92ec0b24a"). InnerVolumeSpecName "kube-api-access-lrwqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955198 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af1db786-f3c4-4881-bd70-8be92ec0b24a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955420 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcxqs\" (UniqueName: \"kubernetes.io/projected/50646a66-7d38-481f-9f16-e33c0de6ac84-kube-api-access-qcxqs\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955524 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrwqs\" (UniqueName: \"kubernetes.io/projected/af1db786-f3c4-4881-bd70-8be92ec0b24a-kube-api-access-lrwqs\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955594 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955651 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50646a66-7d38-481f-9f16-e33c0de6ac84-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955706 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955769 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9twcf\" (UniqueName: \"kubernetes.io/projected/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333-kube-api-access-9twcf\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955824 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mm45n\" (UniqueName: \"kubernetes.io/projected/f4b0f8bd-b95a-4d10-8747-11c0586a710c-kube-api-access-mm45n\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955885 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4b0f8bd-b95a-4d10-8747-11c0586a710c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:23 crc kubenswrapper[4940]: I1126 07:13:23.955940 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bp29v\" (UniqueName: \"kubernetes.io/projected/57dd2efa-d7e3-4ea2-98be-7cdb13472a59-kube-api-access-bp29v\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.122675 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f807-account-create-update-7xz9t" event={"ID":"50646a66-7d38-481f-9f16-e33c0de6ac84","Type":"ContainerDied","Data":"5288163c12ad9981848b182bdcae4a0719d5f4cb0b2d9aa06305e976a21f4588"} Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.122723 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5288163c12ad9981848b182bdcae4a0719d5f4cb0b2d9aa06305e976a21f4588" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.122749 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f807-account-create-update-7xz9t" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.124175 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6413-account-create-update-xg5vm" event={"ID":"d7a24b6b-c7c6-4710-a0a9-1f2730c7c333","Type":"ContainerDied","Data":"0a83f5dfc909f815d1c62d9518bd49b71a56493b894283d704d7747a2c3a8a5a"} Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.124228 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a83f5dfc909f815d1c62d9518bd49b71a56493b894283d704d7747a2c3a8a5a" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.124197 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6413-account-create-update-xg5vm" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.126170 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6b40-account-create-update-bch2t" event={"ID":"af1db786-f3c4-4881-bd70-8be92ec0b24a","Type":"ContainerDied","Data":"701fd99c60ebd5de2234b544a2a33db5179297564860bf8ccc218d2eb0f00531"} Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.126194 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="701fd99c60ebd5de2234b544a2a33db5179297564860bf8ccc218d2eb0f00531" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.126199 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6b40-account-create-update-bch2t" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.140781 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-hnrhd" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.140782 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-hnrhd" event={"ID":"a8fb2242-083d-4a26-957a-0c4386c582c2","Type":"ContainerDied","Data":"0f61e1fcebb35977d3f2a6ffcd886ecfc5b47231425525c64e8ac1ce9358196c"} Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.141384 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f61e1fcebb35977d3f2a6ffcd886ecfc5b47231425525c64e8ac1ce9358196c" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.146104 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-p24mk" event={"ID":"57dd2efa-d7e3-4ea2-98be-7cdb13472a59","Type":"ContainerDied","Data":"212cb02cb2b38681f24bf7e206776182390229ce8708b379af336fcad9b99ed8"} Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.146151 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="212cb02cb2b38681f24bf7e206776182390229ce8708b379af336fcad9b99ed8" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.146194 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-p24mk" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.149310 4940 generic.go:334] "Generic (PLEG): container finished" podID="9b6691fc-2e13-47a2-86ff-cb5350301696" containerID="314e4c503b3e92e3d40e785cc0e21ddd070fa647cdcb8a831a94293baea43f92" exitCode=0 Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.149434 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-f6s5k" event={"ID":"9b6691fc-2e13-47a2-86ff-cb5350301696","Type":"ContainerDied","Data":"314e4c503b3e92e3d40e785cc0e21ddd070fa647cdcb8a831a94293baea43f92"} Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.153335 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h6bf9" event={"ID":"f4b0f8bd-b95a-4d10-8747-11c0586a710c","Type":"ContainerDied","Data":"26c00c7c62322eb8d05edbb9322f1ecbdbada7284633d679576ebd5e048acc3b"} Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.153578 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26c00c7c62322eb8d05edbb9322f1ecbdbada7284633d679576ebd5e048acc3b" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.153742 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h6bf9" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.780583 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-nv2vz"] Nov 26 07:13:24 crc kubenswrapper[4940]: E1126 07:13:24.780943 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8fb2242-083d-4a26-957a-0c4386c582c2" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.780957 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8fb2242-083d-4a26-957a-0c4386c582c2" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: E1126 07:13:24.780972 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b0f8bd-b95a-4d10-8747-11c0586a710c" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.780982 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b0f8bd-b95a-4d10-8747-11c0586a710c" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: E1126 07:13:24.780998 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57dd2efa-d7e3-4ea2-98be-7cdb13472a59" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781006 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="57dd2efa-d7e3-4ea2-98be-7cdb13472a59" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: E1126 07:13:24.781026 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af1db786-f3c4-4881-bd70-8be92ec0b24a" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781050 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="af1db786-f3c4-4881-bd70-8be92ec0b24a" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: E1126 07:13:24.781068 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7a24b6b-c7c6-4710-a0a9-1f2730c7c333" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781076 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7a24b6b-c7c6-4710-a0a9-1f2730c7c333" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: E1126 07:13:24.781092 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" containerName="dnsmasq-dns" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781101 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" containerName="dnsmasq-dns" Nov 26 07:13:24 crc kubenswrapper[4940]: E1126 07:13:24.781119 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" containerName="init" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781127 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" containerName="init" Nov 26 07:13:24 crc kubenswrapper[4940]: E1126 07:13:24.781137 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50646a66-7d38-481f-9f16-e33c0de6ac84" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781145 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="50646a66-7d38-481f-9f16-e33c0de6ac84" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781358 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b0f8bd-b95a-4d10-8747-11c0586a710c" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781368 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="50646a66-7d38-481f-9f16-e33c0de6ac84" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781379 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8fb2242-083d-4a26-957a-0c4386c582c2" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781392 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="af1db786-f3c4-4881-bd70-8be92ec0b24a" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781409 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="57dd2efa-d7e3-4ea2-98be-7cdb13472a59" containerName="mariadb-database-create" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781419 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" containerName="dnsmasq-dns" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.781428 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7a24b6b-c7c6-4710-a0a9-1f2730c7c333" containerName="mariadb-account-create-update" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.782054 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.787994 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.788747 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-q9x2b" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.800845 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-nv2vz"] Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.972985 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-combined-ca-bundle\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.973103 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97ljg\" (UniqueName: \"kubernetes.io/projected/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-kube-api-access-97ljg\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.973147 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-config-data\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:24 crc kubenswrapper[4940]: I1126 07:13:24.973446 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-db-sync-config-data\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.074856 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-db-sync-config-data\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.075276 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-combined-ca-bundle\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.075310 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97ljg\" (UniqueName: \"kubernetes.io/projected/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-kube-api-access-97ljg\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.075343 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-config-data\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.081616 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-db-sync-config-data\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.081725 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-config-data\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.082432 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-combined-ca-bundle\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.101982 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97ljg\" (UniqueName: \"kubernetes.io/projected/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-kube-api-access-97ljg\") pod \"glance-db-sync-nv2vz\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.108069 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.181226 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a12a421-4a7d-4a34-a986-5dd3fa5edfb5" path="/var/lib/kubelet/pods/2a12a421-4a7d-4a34-a986-5dd3fa5edfb5/volumes" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.589101 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.682299 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-swiftconf\") pod \"9b6691fc-2e13-47a2-86ff-cb5350301696\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.682413 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-combined-ca-bundle\") pod \"9b6691fc-2e13-47a2-86ff-cb5350301696\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.682457 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-dispersionconf\") pod \"9b6691fc-2e13-47a2-86ff-cb5350301696\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.682513 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-ring-data-devices\") pod \"9b6691fc-2e13-47a2-86ff-cb5350301696\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.682549 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmt77\" (UniqueName: \"kubernetes.io/projected/9b6691fc-2e13-47a2-86ff-cb5350301696-kube-api-access-jmt77\") pod \"9b6691fc-2e13-47a2-86ff-cb5350301696\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.682579 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b6691fc-2e13-47a2-86ff-cb5350301696-etc-swift\") pod \"9b6691fc-2e13-47a2-86ff-cb5350301696\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.682604 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-scripts\") pod \"9b6691fc-2e13-47a2-86ff-cb5350301696\" (UID: \"9b6691fc-2e13-47a2-86ff-cb5350301696\") " Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.683375 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "9b6691fc-2e13-47a2-86ff-cb5350301696" (UID: "9b6691fc-2e13-47a2-86ff-cb5350301696"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.683548 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b6691fc-2e13-47a2-86ff-cb5350301696-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "9b6691fc-2e13-47a2-86ff-cb5350301696" (UID: "9b6691fc-2e13-47a2-86ff-cb5350301696"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.688363 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b6691fc-2e13-47a2-86ff-cb5350301696-kube-api-access-jmt77" (OuterVolumeSpecName: "kube-api-access-jmt77") pod "9b6691fc-2e13-47a2-86ff-cb5350301696" (UID: "9b6691fc-2e13-47a2-86ff-cb5350301696"). InnerVolumeSpecName "kube-api-access-jmt77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.695541 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "9b6691fc-2e13-47a2-86ff-cb5350301696" (UID: "9b6691fc-2e13-47a2-86ff-cb5350301696"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.709749 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-nv2vz"] Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.721973 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "9b6691fc-2e13-47a2-86ff-cb5350301696" (UID: "9b6691fc-2e13-47a2-86ff-cb5350301696"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.724263 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-scripts" (OuterVolumeSpecName: "scripts") pod "9b6691fc-2e13-47a2-86ff-cb5350301696" (UID: "9b6691fc-2e13-47a2-86ff-cb5350301696"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.724599 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b6691fc-2e13-47a2-86ff-cb5350301696" (UID: "9b6691fc-2e13-47a2-86ff-cb5350301696"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.784735 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.784771 4940 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.784793 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.784807 4940 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b6691fc-2e13-47a2-86ff-cb5350301696-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.784817 4940 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b6691fc-2e13-47a2-86ff-cb5350301696-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.784826 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmt77\" (UniqueName: \"kubernetes.io/projected/9b6691fc-2e13-47a2-86ff-cb5350301696-kube-api-access-jmt77\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:25 crc kubenswrapper[4940]: I1126 07:13:25.784834 4940 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b6691fc-2e13-47a2-86ff-cb5350301696-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:26 crc kubenswrapper[4940]: I1126 07:13:26.176213 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nv2vz" event={"ID":"0c3c0935-a87e-4709-a468-f00d5e8b3ee1","Type":"ContainerStarted","Data":"24ccb2e4b7d32ad415e8ad95e2af98d638e2dc182fd6abd002050f265ec1022b"} Nov 26 07:13:26 crc kubenswrapper[4940]: I1126 07:13:26.178002 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-f6s5k" event={"ID":"9b6691fc-2e13-47a2-86ff-cb5350301696","Type":"ContainerDied","Data":"4ac49d99a033ef3e364f952b756ecaf6f320c8664d88ac027e5ce9941cdf90b2"} Nov 26 07:13:26 crc kubenswrapper[4940]: I1126 07:13:26.178059 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ac49d99a033ef3e364f952b756ecaf6f320c8664d88ac027e5ce9941cdf90b2" Nov 26 07:13:26 crc kubenswrapper[4940]: I1126 07:13:26.178131 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-f6s5k" Nov 26 07:13:27 crc kubenswrapper[4940]: I1126 07:13:27.187974 4940 generic.go:334] "Generic (PLEG): container finished" podID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerID="88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493" exitCode=0 Nov 26 07:13:27 crc kubenswrapper[4940]: I1126 07:13:27.188138 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"11a17576-9a94-4e2d-8915-9d838de09f0b","Type":"ContainerDied","Data":"88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493"} Nov 26 07:13:27 crc kubenswrapper[4940]: I1126 07:13:27.191174 4940 generic.go:334] "Generic (PLEG): container finished" podID="69972749-03ff-48e9-b031-99c33ce86e96" containerID="8e9e90c1c11a39ed3cb269a3561acbf16d889c8ed343e1427b9f27c9d40de9c6" exitCode=0 Nov 26 07:13:27 crc kubenswrapper[4940]: I1126 07:13:27.191210 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"69972749-03ff-48e9-b031-99c33ce86e96","Type":"ContainerDied","Data":"8e9e90c1c11a39ed3cb269a3561acbf16d889c8ed343e1427b9f27c9d40de9c6"} Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.200016 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"69972749-03ff-48e9-b031-99c33ce86e96","Type":"ContainerStarted","Data":"ffc8d224e6ee06035af2a49a3dfbb96ff41fdb30dc4fc3b71983a00df2b005c0"} Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.200723 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.203230 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"11a17576-9a94-4e2d-8915-9d838de09f0b","Type":"ContainerStarted","Data":"ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445"} Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.203411 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.226548 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.070106593 podStartE2EDuration="54.226529166s" podCreationTimestamp="2025-11-26 07:12:34 +0000 UTC" firstStartedPulling="2025-11-26 07:12:36.262212243 +0000 UTC m=+1057.782353862" lastFinishedPulling="2025-11-26 07:12:51.418634816 +0000 UTC m=+1072.938776435" observedRunningTime="2025-11-26 07:13:28.218179791 +0000 UTC m=+1109.738321410" watchObservedRunningTime="2025-11-26 07:13:28.226529166 +0000 UTC m=+1109.746670775" Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.249860 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.531346173 podStartE2EDuration="54.249843306s" podCreationTimestamp="2025-11-26 07:12:34 +0000 UTC" firstStartedPulling="2025-11-26 07:12:36.752815493 +0000 UTC m=+1058.272957112" lastFinishedPulling="2025-11-26 07:12:51.471312626 +0000 UTC m=+1072.991454245" observedRunningTime="2025-11-26 07:13:28.248322498 +0000 UTC m=+1109.768464137" watchObservedRunningTime="2025-11-26 07:13:28.249843306 +0000 UTC m=+1109.769984925" Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.329026 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.334410 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"swift-storage-0\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " pod="openstack/swift-storage-0" Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.356090 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 07:13:28 crc kubenswrapper[4940]: I1126 07:13:28.855057 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 07:13:28 crc kubenswrapper[4940]: W1126 07:13:28.872405 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ae63b19_f186_430b_87f0_d058d2efa83c.slice/crio-4a2662f8ac9a95a04c533b91be7382a4f479563951aae471c80a7d6a5240e657 WatchSource:0}: Error finding container 4a2662f8ac9a95a04c533b91be7382a4f479563951aae471c80a7d6a5240e657: Status 404 returned error can't find the container with id 4a2662f8ac9a95a04c533b91be7382a4f479563951aae471c80a7d6a5240e657 Nov 26 07:13:29 crc kubenswrapper[4940]: I1126 07:13:29.210652 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"4a2662f8ac9a95a04c533b91be7382a4f479563951aae471c80a7d6a5240e657"} Nov 26 07:13:31 crc kubenswrapper[4940]: I1126 07:13:31.227782 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"1fb92f00872a9aa36d49326d1cf65db8a9032a280f9587ed0d9216aef9800d95"} Nov 26 07:13:31 crc kubenswrapper[4940]: I1126 07:13:31.228583 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"5832f00fdc7b07d3b583da1f514fceb0172f7918ac8ced3a03dda26a1c0934ea"} Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.615022 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-78r7g" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" containerName="ovn-controller" probeResult="failure" output=< Nov 26 07:13:35 crc kubenswrapper[4940]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 07:13:35 crc kubenswrapper[4940]: > Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.654599 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.665550 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.879141 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-78r7g-config-l7krz"] Nov 26 07:13:35 crc kubenswrapper[4940]: E1126 07:13:35.879605 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b6691fc-2e13-47a2-86ff-cb5350301696" containerName="swift-ring-rebalance" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.879623 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b6691fc-2e13-47a2-86ff-cb5350301696" containerName="swift-ring-rebalance" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.879868 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b6691fc-2e13-47a2-86ff-cb5350301696" containerName="swift-ring-rebalance" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.880628 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.883034 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.899341 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-78r7g-config-l7krz"] Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.962067 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.962104 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run-ovn\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.962190 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-additional-scripts\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.962226 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sgwc\" (UniqueName: \"kubernetes.io/projected/e67022db-0c7d-4197-9deb-a5deabd14849-kube-api-access-9sgwc\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.962297 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-log-ovn\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:35 crc kubenswrapper[4940]: I1126 07:13:35.962348 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-scripts\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063390 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-log-ovn\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063475 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-scripts\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063600 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run-ovn\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063618 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063645 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-additional-scripts\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063718 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sgwc\" (UniqueName: \"kubernetes.io/projected/e67022db-0c7d-4197-9deb-a5deabd14849-kube-api-access-9sgwc\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063762 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-log-ovn\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063830 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.063866 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run-ovn\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.065207 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-additional-scripts\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.065752 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-scripts\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.085920 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sgwc\" (UniqueName: \"kubernetes.io/projected/e67022db-0c7d-4197-9deb-a5deabd14849-kube-api-access-9sgwc\") pod \"ovn-controller-78r7g-config-l7krz\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:36 crc kubenswrapper[4940]: I1126 07:13:36.254072 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:39 crc kubenswrapper[4940]: I1126 07:13:39.300542 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"7a9b2378bf609eb570091f1edebe633dcf651482009fd78a7715a03ed6c3da04"} Nov 26 07:13:39 crc kubenswrapper[4940]: I1126 07:13:39.309235 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-78r7g-config-l7krz"] Nov 26 07:13:40 crc kubenswrapper[4940]: I1126 07:13:40.309998 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"7d1c1ce5f4f86fefb0522c2b4bb84960ef9691ba82d0c28a857ae9348d2ead68"} Nov 26 07:13:40 crc kubenswrapper[4940]: I1126 07:13:40.311464 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nv2vz" event={"ID":"0c3c0935-a87e-4709-a468-f00d5e8b3ee1","Type":"ContainerStarted","Data":"ac0e35b82f07eb7f9962a21908841a4ad2dc0d4537b49b3d218f50d447c56cc2"} Nov 26 07:13:40 crc kubenswrapper[4940]: I1126 07:13:40.312730 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g-config-l7krz" event={"ID":"e67022db-0c7d-4197-9deb-a5deabd14849","Type":"ContainerDied","Data":"a219095e440f0eedb95f44ff7b1916f2f087302b42b8e8e4c92e06a7d851bbeb"} Nov 26 07:13:40 crc kubenswrapper[4940]: I1126 07:13:40.312743 4940 generic.go:334] "Generic (PLEG): container finished" podID="e67022db-0c7d-4197-9deb-a5deabd14849" containerID="a219095e440f0eedb95f44ff7b1916f2f087302b42b8e8e4c92e06a7d851bbeb" exitCode=0 Nov 26 07:13:40 crc kubenswrapper[4940]: I1126 07:13:40.312802 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g-config-l7krz" event={"ID":"e67022db-0c7d-4197-9deb-a5deabd14849","Type":"ContainerStarted","Data":"a52e97700d3591f914afc1b1d6b9af918011f9934a1c8f458ee370fc2b3da2fb"} Nov 26 07:13:40 crc kubenswrapper[4940]: I1126 07:13:40.332580 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-nv2vz" podStartSLOduration=3.120026053 podStartE2EDuration="16.332562315s" podCreationTimestamp="2025-11-26 07:13:24 +0000 UTC" firstStartedPulling="2025-11-26 07:13:25.714490156 +0000 UTC m=+1107.234631785" lastFinishedPulling="2025-11-26 07:13:38.927026428 +0000 UTC m=+1120.447168047" observedRunningTime="2025-11-26 07:13:40.330467189 +0000 UTC m=+1121.850608828" watchObservedRunningTime="2025-11-26 07:13:40.332562315 +0000 UTC m=+1121.852703934" Nov 26 07:13:40 crc kubenswrapper[4940]: I1126 07:13:40.649194 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-78r7g" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.331068 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"35af38ce835d55824412db931544d40f54c6a971946a5d4b50c5dfa394ce269c"} Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.331162 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"f7ed6711acf7fdec231f79586d9ef7609a087d63274a505334d0634157294d0a"} Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.331193 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"5c0f62d433b891f3245b23b16d6de813e3eab74a72c1c1978aa9aadf0b7c327d"} Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.331219 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"8d0b0bedcd7f34458be64dffa9614d1cddea6dd92857846272fc36941c4d41da"} Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.661598 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756002 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-additional-scripts\") pod \"e67022db-0c7d-4197-9deb-a5deabd14849\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756108 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-log-ovn\") pod \"e67022db-0c7d-4197-9deb-a5deabd14849\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756167 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9sgwc\" (UniqueName: \"kubernetes.io/projected/e67022db-0c7d-4197-9deb-a5deabd14849-kube-api-access-9sgwc\") pod \"e67022db-0c7d-4197-9deb-a5deabd14849\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756208 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run\") pod \"e67022db-0c7d-4197-9deb-a5deabd14849\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756283 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-scripts\") pod \"e67022db-0c7d-4197-9deb-a5deabd14849\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756298 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run-ovn\") pod \"e67022db-0c7d-4197-9deb-a5deabd14849\" (UID: \"e67022db-0c7d-4197-9deb-a5deabd14849\") " Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756284 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e67022db-0c7d-4197-9deb-a5deabd14849" (UID: "e67022db-0c7d-4197-9deb-a5deabd14849"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756314 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run" (OuterVolumeSpecName: "var-run") pod "e67022db-0c7d-4197-9deb-a5deabd14849" (UID: "e67022db-0c7d-4197-9deb-a5deabd14849"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756432 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e67022db-0c7d-4197-9deb-a5deabd14849" (UID: "e67022db-0c7d-4197-9deb-a5deabd14849"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756942 4940 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756971 4940 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.756984 4940 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e67022db-0c7d-4197-9deb-a5deabd14849-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.757094 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e67022db-0c7d-4197-9deb-a5deabd14849" (UID: "e67022db-0c7d-4197-9deb-a5deabd14849"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.757603 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-scripts" (OuterVolumeSpecName: "scripts") pod "e67022db-0c7d-4197-9deb-a5deabd14849" (UID: "e67022db-0c7d-4197-9deb-a5deabd14849"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.762959 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e67022db-0c7d-4197-9deb-a5deabd14849-kube-api-access-9sgwc" (OuterVolumeSpecName: "kube-api-access-9sgwc") pod "e67022db-0c7d-4197-9deb-a5deabd14849" (UID: "e67022db-0c7d-4197-9deb-a5deabd14849"). InnerVolumeSpecName "kube-api-access-9sgwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.858342 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.858372 4940 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e67022db-0c7d-4197-9deb-a5deabd14849-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:41 crc kubenswrapper[4940]: I1126 07:13:41.858383 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9sgwc\" (UniqueName: \"kubernetes.io/projected/e67022db-0c7d-4197-9deb-a5deabd14849-kube-api-access-9sgwc\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.343558 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"ddacc922294d2d9560d232e885d82b0359325dab5663024167d5a82671b91dfe"} Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.345460 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g-config-l7krz" event={"ID":"e67022db-0c7d-4197-9deb-a5deabd14849","Type":"ContainerDied","Data":"a52e97700d3591f914afc1b1d6b9af918011f9934a1c8f458ee370fc2b3da2fb"} Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.345480 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a52e97700d3591f914afc1b1d6b9af918011f9934a1c8f458ee370fc2b3da2fb" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.345537 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g-config-l7krz" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.767060 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-78r7g-config-l7krz"] Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.773561 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-78r7g-config-l7krz"] Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.857182 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-78r7g-config-hrsh6"] Nov 26 07:13:42 crc kubenswrapper[4940]: E1126 07:13:42.857521 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e67022db-0c7d-4197-9deb-a5deabd14849" containerName="ovn-config" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.857536 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e67022db-0c7d-4197-9deb-a5deabd14849" containerName="ovn-config" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.857710 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e67022db-0c7d-4197-9deb-a5deabd14849" containerName="ovn-config" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.858222 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.860843 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.868231 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-78r7g-config-hrsh6"] Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.873540 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run-ovn\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.873571 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-scripts\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.873605 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mjgr\" (UniqueName: \"kubernetes.io/projected/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-kube-api-access-7mjgr\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.873643 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-log-ovn\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.873677 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-additional-scripts\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.873700 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.975092 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run-ovn\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.975145 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-scripts\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.975176 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mjgr\" (UniqueName: \"kubernetes.io/projected/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-kube-api-access-7mjgr\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.975218 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-log-ovn\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.975274 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-additional-scripts\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.975305 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.975741 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.975810 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run-ovn\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.977764 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-scripts\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.978129 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-log-ovn\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:42 crc kubenswrapper[4940]: I1126 07:13:42.978573 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-additional-scripts\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:43 crc kubenswrapper[4940]: I1126 07:13:43.010989 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mjgr\" (UniqueName: \"kubernetes.io/projected/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-kube-api-access-7mjgr\") pod \"ovn-controller-78r7g-config-hrsh6\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:43 crc kubenswrapper[4940]: I1126 07:13:43.175266 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e67022db-0c7d-4197-9deb-a5deabd14849" path="/var/lib/kubelet/pods/e67022db-0c7d-4197-9deb-a5deabd14849/volumes" Nov 26 07:13:43 crc kubenswrapper[4940]: I1126 07:13:43.179893 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:43 crc kubenswrapper[4940]: I1126 07:13:43.381861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"e58705bee99fbf1a356b726ecc7d48c7a1d44cee6e432d30db17de1a4b1bed0c"} Nov 26 07:13:43 crc kubenswrapper[4940]: I1126 07:13:43.381903 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"184283b7cf9ea6a22c37b80e59b65273e5c1e54072b94ba5e98ff402061ac3b7"} Nov 26 07:13:43 crc kubenswrapper[4940]: I1126 07:13:43.381914 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"a370f34c9d0093cf91dd550d53235e78ccb9de14c218c3ae695b5536b1207fa8"} Nov 26 07:13:43 crc kubenswrapper[4940]: W1126 07:13:43.701219 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8fa41107_9fb3_4edd_ab9f_57238cbb74ad.slice/crio-68dbd1c8f93552155f5f61b955490254ad220372260708c40b6e0565ccbf7b07 WatchSource:0}: Error finding container 68dbd1c8f93552155f5f61b955490254ad220372260708c40b6e0565ccbf7b07: Status 404 returned error can't find the container with id 68dbd1c8f93552155f5f61b955490254ad220372260708c40b6e0565ccbf7b07 Nov 26 07:13:43 crc kubenswrapper[4940]: I1126 07:13:43.712070 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-78r7g-config-hrsh6"] Nov 26 07:13:44 crc kubenswrapper[4940]: E1126 07:13:44.299786 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8fa41107_9fb3_4edd_ab9f_57238cbb74ad.slice/crio-conmon-5e79fd5689911f61381e13cf74ad508b7c002ca284841769b93df7d781c78f0b.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.395486 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"5968ea4146aabb9243cd7bc0fcedda38425122a8df83965feee0250ed0d15f33"} Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.395570 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"18fd11a465765ba259762706e393ce42274d6d5ab6b21c460bed17a0534150bb"} Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.395582 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerStarted","Data":"85ac1cbb7cd8cd7a99e39dcf3fc62fbf9041ad24323c288d9baf670d703ac447"} Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.398373 4940 generic.go:334] "Generic (PLEG): container finished" podID="8fa41107-9fb3-4edd-ab9f-57238cbb74ad" containerID="5e79fd5689911f61381e13cf74ad508b7c002ca284841769b93df7d781c78f0b" exitCode=0 Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.398418 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g-config-hrsh6" event={"ID":"8fa41107-9fb3-4edd-ab9f-57238cbb74ad","Type":"ContainerDied","Data":"5e79fd5689911f61381e13cf74ad508b7c002ca284841769b93df7d781c78f0b"} Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.398441 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g-config-hrsh6" event={"ID":"8fa41107-9fb3-4edd-ab9f-57238cbb74ad","Type":"ContainerStarted","Data":"68dbd1c8f93552155f5f61b955490254ad220372260708c40b6e0565ccbf7b07"} Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.440368 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=20.325995138 podStartE2EDuration="33.440342466s" podCreationTimestamp="2025-11-26 07:13:11 +0000 UTC" firstStartedPulling="2025-11-26 07:13:28.875374875 +0000 UTC m=+1110.395516494" lastFinishedPulling="2025-11-26 07:13:41.989722203 +0000 UTC m=+1123.509863822" observedRunningTime="2025-11-26 07:13:44.430199375 +0000 UTC m=+1125.950341014" watchObservedRunningTime="2025-11-26 07:13:44.440342466 +0000 UTC m=+1125.960484115" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.708074 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56766df65f-nw47c"] Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.709486 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.712173 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.723530 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56766df65f-nw47c"] Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.807730 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-sb\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.808211 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-svc\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.808236 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-config\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.808278 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr6jd\" (UniqueName: \"kubernetes.io/projected/0b859c0c-a380-4412-9f3b-6f7de522ad10-kube-api-access-jr6jd\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.808345 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-nb\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.808376 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-swift-storage-0\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.909317 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr6jd\" (UniqueName: \"kubernetes.io/projected/0b859c0c-a380-4412-9f3b-6f7de522ad10-kube-api-access-jr6jd\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.909420 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-nb\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.909458 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-swift-storage-0\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.909499 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-sb\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.909524 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-svc\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.909570 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-config\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.910595 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-sb\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.910633 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-swift-storage-0\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.910649 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-svc\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.910613 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-nb\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.910612 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-config\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:44 crc kubenswrapper[4940]: I1126 07:13:44.930360 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr6jd\" (UniqueName: \"kubernetes.io/projected/0b859c0c-a380-4412-9f3b-6f7de522ad10-kube-api-access-jr6jd\") pod \"dnsmasq-dns-56766df65f-nw47c\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.039955 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.524188 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56766df65f-nw47c"] Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.671667 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.753615 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827058 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-scripts\") pod \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827188 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-additional-scripts\") pod \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827290 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mjgr\" (UniqueName: \"kubernetes.io/projected/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-kube-api-access-7mjgr\") pod \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827322 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run-ovn\") pod \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827370 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run\") pod \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827427 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "8fa41107-9fb3-4edd-ab9f-57238cbb74ad" (UID: "8fa41107-9fb3-4edd-ab9f-57238cbb74ad"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827446 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-log-ovn\") pod \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\" (UID: \"8fa41107-9fb3-4edd-ab9f-57238cbb74ad\") " Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827476 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "8fa41107-9fb3-4edd-ab9f-57238cbb74ad" (UID: "8fa41107-9fb3-4edd-ab9f-57238cbb74ad"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.827501 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run" (OuterVolumeSpecName: "var-run") pod "8fa41107-9fb3-4edd-ab9f-57238cbb74ad" (UID: "8fa41107-9fb3-4edd-ab9f-57238cbb74ad"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.828131 4940 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.828151 4940 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.828165 4940 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.828201 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "8fa41107-9fb3-4edd-ab9f-57238cbb74ad" (UID: "8fa41107-9fb3-4edd-ab9f-57238cbb74ad"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.828526 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-scripts" (OuterVolumeSpecName: "scripts") pod "8fa41107-9fb3-4edd-ab9f-57238cbb74ad" (UID: "8fa41107-9fb3-4edd-ab9f-57238cbb74ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.831786 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-kube-api-access-7mjgr" (OuterVolumeSpecName: "kube-api-access-7mjgr") pod "8fa41107-9fb3-4edd-ab9f-57238cbb74ad" (UID: "8fa41107-9fb3-4edd-ab9f-57238cbb74ad"). InnerVolumeSpecName "kube-api-access-7mjgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.929424 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mjgr\" (UniqueName: \"kubernetes.io/projected/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-kube-api-access-7mjgr\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.929469 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:45 crc kubenswrapper[4940]: I1126 07:13:45.929482 4940 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8fa41107-9fb3-4edd-ab9f-57238cbb74ad-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.109865 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.417031 4940 generic.go:334] "Generic (PLEG): container finished" podID="0c3c0935-a87e-4709-a468-f00d5e8b3ee1" containerID="ac0e35b82f07eb7f9962a21908841a4ad2dc0d4537b49b3d218f50d447c56cc2" exitCode=0 Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.417108 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nv2vz" event={"ID":"0c3c0935-a87e-4709-a468-f00d5e8b3ee1","Type":"ContainerDied","Data":"ac0e35b82f07eb7f9962a21908841a4ad2dc0d4537b49b3d218f50d447c56cc2"} Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.419214 4940 generic.go:334] "Generic (PLEG): container finished" podID="0b859c0c-a380-4412-9f3b-6f7de522ad10" containerID="f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798" exitCode=0 Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.419277 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56766df65f-nw47c" event={"ID":"0b859c0c-a380-4412-9f3b-6f7de522ad10","Type":"ContainerDied","Data":"f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798"} Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.419300 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56766df65f-nw47c" event={"ID":"0b859c0c-a380-4412-9f3b-6f7de522ad10","Type":"ContainerStarted","Data":"58e02585612c1b8ff6100e772720c2da4295cb386e51146ec46c80636d109255"} Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.421822 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g-config-hrsh6" event={"ID":"8fa41107-9fb3-4edd-ab9f-57238cbb74ad","Type":"ContainerDied","Data":"68dbd1c8f93552155f5f61b955490254ad220372260708c40b6e0565ccbf7b07"} Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.421859 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68dbd1c8f93552155f5f61b955490254ad220372260708c40b6e0565ccbf7b07" Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.421915 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g-config-hrsh6" Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.750287 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-78r7g-config-hrsh6"] Nov 26 07:13:46 crc kubenswrapper[4940]: I1126 07:13:46.763929 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-78r7g-config-hrsh6"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.175309 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fa41107-9fb3-4edd-ab9f-57238cbb74ad" path="/var/lib/kubelet/pods/8fa41107-9fb3-4edd-ab9f-57238cbb74ad/volumes" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.431305 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56766df65f-nw47c" event={"ID":"0b859c0c-a380-4412-9f3b-6f7de522ad10","Type":"ContainerStarted","Data":"771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2"} Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.464320 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56766df65f-nw47c" podStartSLOduration=3.464297932 podStartE2EDuration="3.464297932s" podCreationTimestamp="2025-11-26 07:13:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:47.456852945 +0000 UTC m=+1128.976994574" watchObservedRunningTime="2025-11-26 07:13:47.464297932 +0000 UTC m=+1128.984439541" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.539423 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-sdhbb"] Nov 26 07:13:47 crc kubenswrapper[4940]: E1126 07:13:47.539771 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fa41107-9fb3-4edd-ab9f-57238cbb74ad" containerName="ovn-config" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.539788 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fa41107-9fb3-4edd-ab9f-57238cbb74ad" containerName="ovn-config" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.539968 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fa41107-9fb3-4edd-ab9f-57238cbb74ad" containerName="ovn-config" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.540471 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.549306 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sdhbb"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.652403 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-7qg26"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.653884 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.655017 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33462c2c-29fb-487a-8f23-db40ba07be25-operator-scripts\") pod \"cinder-db-create-sdhbb\" (UID: \"33462c2c-29fb-487a-8f23-db40ba07be25\") " pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.655109 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwbkz\" (UniqueName: \"kubernetes.io/projected/33462c2c-29fb-487a-8f23-db40ba07be25-kube-api-access-xwbkz\") pod \"cinder-db-create-sdhbb\" (UID: \"33462c2c-29fb-487a-8f23-db40ba07be25\") " pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.671976 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-7f7c-account-create-update-j9m8h"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.676622 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.679119 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.694937 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-7qg26"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.701964 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-7f7c-account-create-update-j9m8h"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.756890 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwbkz\" (UniqueName: \"kubernetes.io/projected/33462c2c-29fb-487a-8f23-db40ba07be25-kube-api-access-xwbkz\") pod \"cinder-db-create-sdhbb\" (UID: \"33462c2c-29fb-487a-8f23-db40ba07be25\") " pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.757056 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbt47\" (UniqueName: \"kubernetes.io/projected/326ec868-d611-435e-9dd2-769dc279c6c5-kube-api-access-bbt47\") pod \"cinder-7f7c-account-create-update-j9m8h\" (UID: \"326ec868-d611-435e-9dd2-769dc279c6c5\") " pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.757083 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7zw9\" (UniqueName: \"kubernetes.io/projected/8cb1ded9-17a3-40e3-955b-11e63806cd6f-kube-api-access-v7zw9\") pod \"barbican-db-create-7qg26\" (UID: \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\") " pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.757176 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/326ec868-d611-435e-9dd2-769dc279c6c5-operator-scripts\") pod \"cinder-7f7c-account-create-update-j9m8h\" (UID: \"326ec868-d611-435e-9dd2-769dc279c6c5\") " pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.757272 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33462c2c-29fb-487a-8f23-db40ba07be25-operator-scripts\") pod \"cinder-db-create-sdhbb\" (UID: \"33462c2c-29fb-487a-8f23-db40ba07be25\") " pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.757319 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cb1ded9-17a3-40e3-955b-11e63806cd6f-operator-scripts\") pod \"barbican-db-create-7qg26\" (UID: \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\") " pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.758712 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33462c2c-29fb-487a-8f23-db40ba07be25-operator-scripts\") pod \"cinder-db-create-sdhbb\" (UID: \"33462c2c-29fb-487a-8f23-db40ba07be25\") " pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.760707 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-2dca-account-create-update-z9bbp"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.765292 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.770351 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.784109 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2dca-account-create-update-z9bbp"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.812460 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwbkz\" (UniqueName: \"kubernetes.io/projected/33462c2c-29fb-487a-8f23-db40ba07be25-kube-api-access-xwbkz\") pod \"cinder-db-create-sdhbb\" (UID: \"33462c2c-29fb-487a-8f23-db40ba07be25\") " pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.860834 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.861455 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-zzrpz"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.861451 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cb1ded9-17a3-40e3-955b-11e63806cd6f-operator-scripts\") pod \"barbican-db-create-7qg26\" (UID: \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\") " pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.861665 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-operator-scripts\") pod \"barbican-2dca-account-create-update-z9bbp\" (UID: \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\") " pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.861697 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jd4d\" (UniqueName: \"kubernetes.io/projected/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-kube-api-access-9jd4d\") pod \"barbican-2dca-account-create-update-z9bbp\" (UID: \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\") " pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.861817 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbt47\" (UniqueName: \"kubernetes.io/projected/326ec868-d611-435e-9dd2-769dc279c6c5-kube-api-access-bbt47\") pod \"cinder-7f7c-account-create-update-j9m8h\" (UID: \"326ec868-d611-435e-9dd2-769dc279c6c5\") " pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.861843 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7zw9\" (UniqueName: \"kubernetes.io/projected/8cb1ded9-17a3-40e3-955b-11e63806cd6f-kube-api-access-v7zw9\") pod \"barbican-db-create-7qg26\" (UID: \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\") " pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.861878 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/326ec868-d611-435e-9dd2-769dc279c6c5-operator-scripts\") pod \"cinder-7f7c-account-create-update-j9m8h\" (UID: \"326ec868-d611-435e-9dd2-769dc279c6c5\") " pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.862452 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.862677 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/326ec868-d611-435e-9dd2-769dc279c6c5-operator-scripts\") pod \"cinder-7f7c-account-create-update-j9m8h\" (UID: \"326ec868-d611-435e-9dd2-769dc279c6c5\") " pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.863427 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cb1ded9-17a3-40e3-955b-11e63806cd6f-operator-scripts\") pod \"barbican-db-create-7qg26\" (UID: \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\") " pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.870318 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-zzrpz"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.890927 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbt47\" (UniqueName: \"kubernetes.io/projected/326ec868-d611-435e-9dd2-769dc279c6c5-kube-api-access-bbt47\") pod \"cinder-7f7c-account-create-update-j9m8h\" (UID: \"326ec868-d611-435e-9dd2-769dc279c6c5\") " pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.893518 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7zw9\" (UniqueName: \"kubernetes.io/projected/8cb1ded9-17a3-40e3-955b-11e63806cd6f-kube-api-access-v7zw9\") pod \"barbican-db-create-7qg26\" (UID: \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\") " pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.952982 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-mkt8h"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.954423 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.956686 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.957141 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.959110 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.959268 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tgffk" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.962494 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-2ea3-account-create-update-f9hmh"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.963157 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-operator-scripts\") pod \"barbican-2dca-account-create-update-z9bbp\" (UID: \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\") " pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.963190 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jd4d\" (UniqueName: \"kubernetes.io/projected/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-kube-api-access-9jd4d\") pod \"barbican-2dca-account-create-update-z9bbp\" (UID: \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\") " pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.963223 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f867029a-5e81-436e-82ed-d8c3cef5b734-operator-scripts\") pod \"neutron-db-create-zzrpz\" (UID: \"f867029a-5e81-436e-82ed-d8c3cef5b734\") " pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.963241 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlqws\" (UniqueName: \"kubernetes.io/projected/f867029a-5e81-436e-82ed-d8c3cef5b734-kube-api-access-wlqws\") pod \"neutron-db-create-zzrpz\" (UID: \"f867029a-5e81-436e-82ed-d8c3cef5b734\") " pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.963825 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-operator-scripts\") pod \"barbican-2dca-account-create-update-z9bbp\" (UID: \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\") " pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.963843 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.965440 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.975401 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.984281 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-2ea3-account-create-update-f9hmh"] Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.985857 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jd4d\" (UniqueName: \"kubernetes.io/projected/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-kube-api-access-9jd4d\") pod \"barbican-2dca-account-create-update-z9bbp\" (UID: \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\") " pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:47 crc kubenswrapper[4940]: I1126 07:13:47.991228 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-mkt8h"] Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.000619 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.063879 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw8rg\" (UniqueName: \"kubernetes.io/projected/0d22c5e6-6595-4926-9525-cc5e90134b3c-kube-api-access-bw8rg\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.063951 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-operator-scripts\") pod \"neutron-2ea3-account-create-update-f9hmh\" (UID: \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\") " pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.063975 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f867029a-5e81-436e-82ed-d8c3cef5b734-operator-scripts\") pod \"neutron-db-create-zzrpz\" (UID: \"f867029a-5e81-436e-82ed-d8c3cef5b734\") " pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.063997 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlqws\" (UniqueName: \"kubernetes.io/projected/f867029a-5e81-436e-82ed-d8c3cef5b734-kube-api-access-wlqws\") pod \"neutron-db-create-zzrpz\" (UID: \"f867029a-5e81-436e-82ed-d8c3cef5b734\") " pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.064027 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-combined-ca-bundle\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.064066 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvwcr\" (UniqueName: \"kubernetes.io/projected/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-kube-api-access-qvwcr\") pod \"neutron-2ea3-account-create-update-f9hmh\" (UID: \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\") " pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.064093 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-config-data\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.064667 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f867029a-5e81-436e-82ed-d8c3cef5b734-operator-scripts\") pod \"neutron-db-create-zzrpz\" (UID: \"f867029a-5e81-436e-82ed-d8c3cef5b734\") " pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.082128 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlqws\" (UniqueName: \"kubernetes.io/projected/f867029a-5e81-436e-82ed-d8c3cef5b734-kube-api-access-wlqws\") pod \"neutron-db-create-zzrpz\" (UID: \"f867029a-5e81-436e-82ed-d8c3cef5b734\") " pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.086048 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.093013 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.172006 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-operator-scripts\") pod \"neutron-2ea3-account-create-update-f9hmh\" (UID: \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\") " pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.172746 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-combined-ca-bundle\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.172796 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvwcr\" (UniqueName: \"kubernetes.io/projected/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-kube-api-access-qvwcr\") pod \"neutron-2ea3-account-create-update-f9hmh\" (UID: \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\") " pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.172848 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-config-data\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.172949 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw8rg\" (UniqueName: \"kubernetes.io/projected/0d22c5e6-6595-4926-9525-cc5e90134b3c-kube-api-access-bw8rg\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.174169 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-operator-scripts\") pod \"neutron-2ea3-account-create-update-f9hmh\" (UID: \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\") " pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.189012 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-combined-ca-bundle\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.189559 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-config-data\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.200052 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw8rg\" (UniqueName: \"kubernetes.io/projected/0d22c5e6-6595-4926-9525-cc5e90134b3c-kube-api-access-bw8rg\") pod \"keystone-db-sync-mkt8h\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.207961 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvwcr\" (UniqueName: \"kubernetes.io/projected/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-kube-api-access-qvwcr\") pod \"neutron-2ea3-account-create-update-f9hmh\" (UID: \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\") " pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.273571 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-combined-ca-bundle\") pod \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.273627 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-config-data\") pod \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.273738 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97ljg\" (UniqueName: \"kubernetes.io/projected/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-kube-api-access-97ljg\") pod \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.273803 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-db-sync-config-data\") pod \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\" (UID: \"0c3c0935-a87e-4709-a468-f00d5e8b3ee1\") " Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.276851 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.279452 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-kube-api-access-97ljg" (OuterVolumeSpecName: "kube-api-access-97ljg") pod "0c3c0935-a87e-4709-a468-f00d5e8b3ee1" (UID: "0c3c0935-a87e-4709-a468-f00d5e8b3ee1"). InnerVolumeSpecName "kube-api-access-97ljg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.286011 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0c3c0935-a87e-4709-a468-f00d5e8b3ee1" (UID: "0c3c0935-a87e-4709-a468-f00d5e8b3ee1"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.301447 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c3c0935-a87e-4709-a468-f00d5e8b3ee1" (UID: "0c3c0935-a87e-4709-a468-f00d5e8b3ee1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.327426 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-config-data" (OuterVolumeSpecName: "config-data") pod "0c3c0935-a87e-4709-a468-f00d5e8b3ee1" (UID: "0c3c0935-a87e-4709-a468-f00d5e8b3ee1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.387590 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.402125 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.402733 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.402755 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.402764 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97ljg\" (UniqueName: \"kubernetes.io/projected/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-kube-api-access-97ljg\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.402774 4940 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0c3c0935-a87e-4709-a468-f00d5e8b3ee1-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.464824 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-nv2vz" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.465473 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-nv2vz" event={"ID":"0c3c0935-a87e-4709-a468-f00d5e8b3ee1","Type":"ContainerDied","Data":"24ccb2e4b7d32ad415e8ad95e2af98d638e2dc182fd6abd002050f265ec1022b"} Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.465510 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24ccb2e4b7d32ad415e8ad95e2af98d638e2dc182fd6abd002050f265ec1022b" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.465531 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.503313 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sdhbb"] Nov 26 07:13:48 crc kubenswrapper[4940]: W1126 07:13:48.526208 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33462c2c_29fb_487a_8f23_db40ba07be25.slice/crio-654b0849d55a0e0e502e985085ff2b9981188f3c470c14a7bc789ab795702422 WatchSource:0}: Error finding container 654b0849d55a0e0e502e985085ff2b9981188f3c470c14a7bc789ab795702422: Status 404 returned error can't find the container with id 654b0849d55a0e0e502e985085ff2b9981188f3c470c14a7bc789ab795702422 Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.564857 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-7qg26"] Nov 26 07:13:48 crc kubenswrapper[4940]: W1126 07:13:48.572605 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8cb1ded9_17a3_40e3_955b_11e63806cd6f.slice/crio-8a319dfc8c7773fda8bae320292d0117b62f5b5cd7ae7f74c869f61a49075b2f WatchSource:0}: Error finding container 8a319dfc8c7773fda8bae320292d0117b62f5b5cd7ae7f74c869f61a49075b2f: Status 404 returned error can't find the container with id 8a319dfc8c7773fda8bae320292d0117b62f5b5cd7ae7f74c869f61a49075b2f Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.679461 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2dca-account-create-update-z9bbp"] Nov 26 07:13:48 crc kubenswrapper[4940]: I1126 07:13:48.684224 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-7f7c-account-create-update-j9m8h"] Nov 26 07:13:48 crc kubenswrapper[4940]: W1126 07:13:48.688713 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod326ec868_d611_435e_9dd2_769dc279c6c5.slice/crio-bb9908de18183171b76dcabd5216b202a878f213d0821fdd4c7a08e004d116a9 WatchSource:0}: Error finding container bb9908de18183171b76dcabd5216b202a878f213d0821fdd4c7a08e004d116a9: Status 404 returned error can't find the container with id bb9908de18183171b76dcabd5216b202a878f213d0821fdd4c7a08e004d116a9 Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:48.925743 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56766df65f-nw47c"] Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:48.962024 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6856c564b9-5c8k6"] Nov 26 07:13:49 crc kubenswrapper[4940]: E1126 07:13:48.962471 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c3c0935-a87e-4709-a468-f00d5e8b3ee1" containerName="glance-db-sync" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:48.962484 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c3c0935-a87e-4709-a468-f00d5e8b3ee1" containerName="glance-db-sync" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:48.962666 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c3c0935-a87e-4709-a468-f00d5e8b3ee1" containerName="glance-db-sync" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:48.963609 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:48.990960 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-zzrpz"] Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.029462 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6856c564b9-5c8k6"] Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.119449 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-nb\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.119646 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-swift-storage-0\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.119673 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-svc\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.119724 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-config\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.119787 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bkxl\" (UniqueName: \"kubernetes.io/projected/1c6385f9-fbe8-4b99-be0a-ed858327c085-kube-api-access-7bkxl\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.119827 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-sb\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.221864 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-swift-storage-0\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.221918 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-svc\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.222017 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-config\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.222099 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bkxl\" (UniqueName: \"kubernetes.io/projected/1c6385f9-fbe8-4b99-be0a-ed858327c085-kube-api-access-7bkxl\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.222148 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-sb\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.222226 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-nb\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.222897 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-swift-storage-0\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.222914 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-svc\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.223173 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-nb\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.223579 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-config\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.223897 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-sb\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.276403 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bkxl\" (UniqueName: \"kubernetes.io/projected/1c6385f9-fbe8-4b99-be0a-ed858327c085-kube-api-access-7bkxl\") pod \"dnsmasq-dns-6856c564b9-5c8k6\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.360501 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.478451 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7f7c-account-create-update-j9m8h" event={"ID":"326ec868-d611-435e-9dd2-769dc279c6c5","Type":"ContainerStarted","Data":"d2e2b5615ecd67dfc4f4c70f5e276eed17c63b8bfbb7e591c55bc593760db45c"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.478901 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7f7c-account-create-update-j9m8h" event={"ID":"326ec868-d611-435e-9dd2-769dc279c6c5","Type":"ContainerStarted","Data":"bb9908de18183171b76dcabd5216b202a878f213d0821fdd4c7a08e004d116a9"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.480240 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-zzrpz" event={"ID":"f867029a-5e81-436e-82ed-d8c3cef5b734","Type":"ContainerStarted","Data":"ed816dacf55ec0bc4c721ede36d0a5e84c165a7151e932b5a7869aa986b4649d"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.480274 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-zzrpz" event={"ID":"f867029a-5e81-436e-82ed-d8c3cef5b734","Type":"ContainerStarted","Data":"71e529627ce8ca061c5a64e6b2f3840f80bec4a80b728d4729edc9b7f9c83499"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.483915 4940 generic.go:334] "Generic (PLEG): container finished" podID="8cb1ded9-17a3-40e3-955b-11e63806cd6f" containerID="b2079e20c2f7eb5ab7bfd36841e069cbf0408264fccf23ef33aac1c97d13b0b7" exitCode=0 Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.484026 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7qg26" event={"ID":"8cb1ded9-17a3-40e3-955b-11e63806cd6f","Type":"ContainerDied","Data":"b2079e20c2f7eb5ab7bfd36841e069cbf0408264fccf23ef33aac1c97d13b0b7"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.484060 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7qg26" event={"ID":"8cb1ded9-17a3-40e3-955b-11e63806cd6f","Type":"ContainerStarted","Data":"8a319dfc8c7773fda8bae320292d0117b62f5b5cd7ae7f74c869f61a49075b2f"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.487117 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2dca-account-create-update-z9bbp" event={"ID":"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c","Type":"ContainerStarted","Data":"5d90b332961fb439346d759e5988a9dafd760f766c76d53447f03e456e68a27f"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.487139 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2dca-account-create-update-z9bbp" event={"ID":"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c","Type":"ContainerStarted","Data":"36b9f85a03cfd50ee03547bde1fbd44708c8b18807bff159955e75e345fcb347"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.489033 4940 generic.go:334] "Generic (PLEG): container finished" podID="33462c2c-29fb-487a-8f23-db40ba07be25" containerID="6b020022938f10660afac714fd60b211b8b08756c8097b70ff711d6b7d8686d0" exitCode=0 Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.489334 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sdhbb" event={"ID":"33462c2c-29fb-487a-8f23-db40ba07be25","Type":"ContainerDied","Data":"6b020022938f10660afac714fd60b211b8b08756c8097b70ff711d6b7d8686d0"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.489381 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sdhbb" event={"ID":"33462c2c-29fb-487a-8f23-db40ba07be25","Type":"ContainerStarted","Data":"654b0849d55a0e0e502e985085ff2b9981188f3c470c14a7bc789ab795702422"} Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.496987 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-7f7c-account-create-update-j9m8h" podStartSLOduration=2.496966629 podStartE2EDuration="2.496966629s" podCreationTimestamp="2025-11-26 07:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:49.491434904 +0000 UTC m=+1131.011576523" watchObservedRunningTime="2025-11-26 07:13:49.496966629 +0000 UTC m=+1131.017108248" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.510211 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-2dca-account-create-update-z9bbp" podStartSLOduration=2.510179548 podStartE2EDuration="2.510179548s" podCreationTimestamp="2025-11-26 07:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:49.506217412 +0000 UTC m=+1131.026359031" watchObservedRunningTime="2025-11-26 07:13:49.510179548 +0000 UTC m=+1131.030321187" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.548933 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-zzrpz" podStartSLOduration=2.5489093560000002 podStartE2EDuration="2.548909356s" podCreationTimestamp="2025-11-26 07:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:49.540553801 +0000 UTC m=+1131.060695420" watchObservedRunningTime="2025-11-26 07:13:49.548909356 +0000 UTC m=+1131.069050975" Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.813979 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-mkt8h"] Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.853424 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-2ea3-account-create-update-f9hmh"] Nov 26 07:13:49 crc kubenswrapper[4940]: I1126 07:13:49.982093 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6856c564b9-5c8k6"] Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.500814 4940 generic.go:334] "Generic (PLEG): container finished" podID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerID="8fd2f9b1a0537af60c43b1fc9f7d253646e9f4a6e0f1379d2ec9fe17df7506cf" exitCode=0 Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.500985 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" event={"ID":"1c6385f9-fbe8-4b99-be0a-ed858327c085","Type":"ContainerDied","Data":"8fd2f9b1a0537af60c43b1fc9f7d253646e9f4a6e0f1379d2ec9fe17df7506cf"} Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.501380 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" event={"ID":"1c6385f9-fbe8-4b99-be0a-ed858327c085","Type":"ContainerStarted","Data":"f78215ca99f0332c05da3e7012d2e2d387568a91d53f393ce4f6d88c7b721949"} Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.505115 4940 generic.go:334] "Generic (PLEG): container finished" podID="7ad2bcd9-0999-4e06-84d1-aed4c51e4edd" containerID="544aae1a724f7ff43db122d3db2ab2e7c44082064a7f75e5a466aae86843c5f8" exitCode=0 Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.505280 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2ea3-account-create-update-f9hmh" event={"ID":"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd","Type":"ContainerDied","Data":"544aae1a724f7ff43db122d3db2ab2e7c44082064a7f75e5a466aae86843c5f8"} Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.505313 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2ea3-account-create-update-f9hmh" event={"ID":"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd","Type":"ContainerStarted","Data":"bf985bc4c29379ee455d64d7c08ef2fe4d7521c9d361067b7fec5c72ed1ea73c"} Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.506674 4940 generic.go:334] "Generic (PLEG): container finished" podID="cf5afc28-5f57-4d6f-97de-0bac57eb7a1c" containerID="5d90b332961fb439346d759e5988a9dafd760f766c76d53447f03e456e68a27f" exitCode=0 Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.506740 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2dca-account-create-update-z9bbp" event={"ID":"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c","Type":"ContainerDied","Data":"5d90b332961fb439346d759e5988a9dafd760f766c76d53447f03e456e68a27f"} Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.508808 4940 generic.go:334] "Generic (PLEG): container finished" podID="326ec868-d611-435e-9dd2-769dc279c6c5" containerID="d2e2b5615ecd67dfc4f4c70f5e276eed17c63b8bfbb7e591c55bc593760db45c" exitCode=0 Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.508962 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7f7c-account-create-update-j9m8h" event={"ID":"326ec868-d611-435e-9dd2-769dc279c6c5","Type":"ContainerDied","Data":"d2e2b5615ecd67dfc4f4c70f5e276eed17c63b8bfbb7e591c55bc593760db45c"} Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.512864 4940 generic.go:334] "Generic (PLEG): container finished" podID="f867029a-5e81-436e-82ed-d8c3cef5b734" containerID="ed816dacf55ec0bc4c721ede36d0a5e84c165a7151e932b5a7869aa986b4649d" exitCode=0 Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.513016 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-zzrpz" event={"ID":"f867029a-5e81-436e-82ed-d8c3cef5b734","Type":"ContainerDied","Data":"ed816dacf55ec0bc4c721ede36d0a5e84c165a7151e932b5a7869aa986b4649d"} Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.515450 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mkt8h" event={"ID":"0d22c5e6-6595-4926-9525-cc5e90134b3c","Type":"ContainerStarted","Data":"f4a85f49f74aa1d18876e560fbfba0e52b65ffa06409638560f5ad900ab5fa73"} Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.515782 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56766df65f-nw47c" podUID="0b859c0c-a380-4412-9f3b-6f7de522ad10" containerName="dnsmasq-dns" containerID="cri-o://771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2" gracePeriod=10 Nov 26 07:13:50 crc kubenswrapper[4940]: I1126 07:13:50.927853 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.052373 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.055785 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwbkz\" (UniqueName: \"kubernetes.io/projected/33462c2c-29fb-487a-8f23-db40ba07be25-kube-api-access-xwbkz\") pod \"33462c2c-29fb-487a-8f23-db40ba07be25\" (UID: \"33462c2c-29fb-487a-8f23-db40ba07be25\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.055925 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33462c2c-29fb-487a-8f23-db40ba07be25-operator-scripts\") pod \"33462c2c-29fb-487a-8f23-db40ba07be25\" (UID: \"33462c2c-29fb-487a-8f23-db40ba07be25\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.057672 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33462c2c-29fb-487a-8f23-db40ba07be25-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "33462c2c-29fb-487a-8f23-db40ba07be25" (UID: "33462c2c-29fb-487a-8f23-db40ba07be25"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.061698 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.067670 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33462c2c-29fb-487a-8f23-db40ba07be25-kube-api-access-xwbkz" (OuterVolumeSpecName: "kube-api-access-xwbkz") pod "33462c2c-29fb-487a-8f23-db40ba07be25" (UID: "33462c2c-29fb-487a-8f23-db40ba07be25"). InnerVolumeSpecName "kube-api-access-xwbkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.157408 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cb1ded9-17a3-40e3-955b-11e63806cd6f-operator-scripts\") pod \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\" (UID: \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.157506 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7zw9\" (UniqueName: \"kubernetes.io/projected/8cb1ded9-17a3-40e3-955b-11e63806cd6f-kube-api-access-v7zw9\") pod \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\" (UID: \"8cb1ded9-17a3-40e3-955b-11e63806cd6f\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.157953 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33462c2c-29fb-487a-8f23-db40ba07be25-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.157978 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwbkz\" (UniqueName: \"kubernetes.io/projected/33462c2c-29fb-487a-8f23-db40ba07be25-kube-api-access-xwbkz\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.159093 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cb1ded9-17a3-40e3-955b-11e63806cd6f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8cb1ded9-17a3-40e3-955b-11e63806cd6f" (UID: "8cb1ded9-17a3-40e3-955b-11e63806cd6f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.162108 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cb1ded9-17a3-40e3-955b-11e63806cd6f-kube-api-access-v7zw9" (OuterVolumeSpecName: "kube-api-access-v7zw9") pod "8cb1ded9-17a3-40e3-955b-11e63806cd6f" (UID: "8cb1ded9-17a3-40e3-955b-11e63806cd6f"). InnerVolumeSpecName "kube-api-access-v7zw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.258841 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-nb\") pod \"0b859c0c-a380-4412-9f3b-6f7de522ad10\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.258950 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jr6jd\" (UniqueName: \"kubernetes.io/projected/0b859c0c-a380-4412-9f3b-6f7de522ad10-kube-api-access-jr6jd\") pod \"0b859c0c-a380-4412-9f3b-6f7de522ad10\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.259055 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-sb\") pod \"0b859c0c-a380-4412-9f3b-6f7de522ad10\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.259113 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-config\") pod \"0b859c0c-a380-4412-9f3b-6f7de522ad10\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.259150 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-svc\") pod \"0b859c0c-a380-4412-9f3b-6f7de522ad10\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.259206 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-swift-storage-0\") pod \"0b859c0c-a380-4412-9f3b-6f7de522ad10\" (UID: \"0b859c0c-a380-4412-9f3b-6f7de522ad10\") " Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.259999 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cb1ded9-17a3-40e3-955b-11e63806cd6f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.260116 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7zw9\" (UniqueName: \"kubernetes.io/projected/8cb1ded9-17a3-40e3-955b-11e63806cd6f-kube-api-access-v7zw9\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.266576 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b859c0c-a380-4412-9f3b-6f7de522ad10-kube-api-access-jr6jd" (OuterVolumeSpecName: "kube-api-access-jr6jd") pod "0b859c0c-a380-4412-9f3b-6f7de522ad10" (UID: "0b859c0c-a380-4412-9f3b-6f7de522ad10"). InnerVolumeSpecName "kube-api-access-jr6jd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.298564 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0b859c0c-a380-4412-9f3b-6f7de522ad10" (UID: "0b859c0c-a380-4412-9f3b-6f7de522ad10"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.299775 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0b859c0c-a380-4412-9f3b-6f7de522ad10" (UID: "0b859c0c-a380-4412-9f3b-6f7de522ad10"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.302797 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0b859c0c-a380-4412-9f3b-6f7de522ad10" (UID: "0b859c0c-a380-4412-9f3b-6f7de522ad10"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.307965 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0b859c0c-a380-4412-9f3b-6f7de522ad10" (UID: "0b859c0c-a380-4412-9f3b-6f7de522ad10"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.311433 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-config" (OuterVolumeSpecName: "config") pod "0b859c0c-a380-4412-9f3b-6f7de522ad10" (UID: "0b859c0c-a380-4412-9f3b-6f7de522ad10"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.362129 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.362169 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.362179 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.362191 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.362200 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jr6jd\" (UniqueName: \"kubernetes.io/projected/0b859c0c-a380-4412-9f3b-6f7de522ad10-kube-api-access-jr6jd\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.362210 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0b859c0c-a380-4412-9f3b-6f7de522ad10-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.530943 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sdhbb" event={"ID":"33462c2c-29fb-487a-8f23-db40ba07be25","Type":"ContainerDied","Data":"654b0849d55a0e0e502e985085ff2b9981188f3c470c14a7bc789ab795702422"} Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.531301 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="654b0849d55a0e0e502e985085ff2b9981188f3c470c14a7bc789ab795702422" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.531011 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sdhbb" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.533155 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-7qg26" event={"ID":"8cb1ded9-17a3-40e3-955b-11e63806cd6f","Type":"ContainerDied","Data":"8a319dfc8c7773fda8bae320292d0117b62f5b5cd7ae7f74c869f61a49075b2f"} Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.533179 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-7qg26" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.533192 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a319dfc8c7773fda8bae320292d0117b62f5b5cd7ae7f74c869f61a49075b2f" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.535354 4940 generic.go:334] "Generic (PLEG): container finished" podID="0b859c0c-a380-4412-9f3b-6f7de522ad10" containerID="771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2" exitCode=0 Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.535398 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56766df65f-nw47c" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.535477 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56766df65f-nw47c" event={"ID":"0b859c0c-a380-4412-9f3b-6f7de522ad10","Type":"ContainerDied","Data":"771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2"} Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.535538 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56766df65f-nw47c" event={"ID":"0b859c0c-a380-4412-9f3b-6f7de522ad10","Type":"ContainerDied","Data":"58e02585612c1b8ff6100e772720c2da4295cb386e51146ec46c80636d109255"} Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.535562 4940 scope.go:117] "RemoveContainer" containerID="771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.545578 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" event={"ID":"1c6385f9-fbe8-4b99-be0a-ed858327c085","Type":"ContainerStarted","Data":"0c1c686c9ef7059d8ab567ae5883d0248649e6a929c20f2da67127f20b09b2d0"} Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.545630 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.564808 4940 scope.go:117] "RemoveContainer" containerID="f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.582694 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" podStartSLOduration=3.582672358 podStartE2EDuration="3.582672358s" podCreationTimestamp="2025-11-26 07:13:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:13:51.568338584 +0000 UTC m=+1133.088480213" watchObservedRunningTime="2025-11-26 07:13:51.582672358 +0000 UTC m=+1133.102813977" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.593644 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56766df65f-nw47c"] Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.602269 4940 scope.go:117] "RemoveContainer" containerID="771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.602988 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56766df65f-nw47c"] Nov 26 07:13:51 crc kubenswrapper[4940]: E1126 07:13:51.604345 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2\": container with ID starting with 771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2 not found: ID does not exist" containerID="771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.604570 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2"} err="failed to get container status \"771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2\": rpc error: code = NotFound desc = could not find container \"771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2\": container with ID starting with 771b87b1dc0468af13a007f4dac73e8accd74aa2c420bc25d239bf888448f3b2 not found: ID does not exist" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.604650 4940 scope.go:117] "RemoveContainer" containerID="f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798" Nov 26 07:13:51 crc kubenswrapper[4940]: E1126 07:13:51.607245 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798\": container with ID starting with f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798 not found: ID does not exist" containerID="f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.607290 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798"} err="failed to get container status \"f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798\": rpc error: code = NotFound desc = could not find container \"f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798\": container with ID starting with f685b9b939f43d7c9abc61a62a04c7b3c5ee646949ceca848b232e227a17c798 not found: ID does not exist" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.728883 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.728936 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:13:51 crc kubenswrapper[4940]: I1126 07:13:51.938812 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.031371 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.035636 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.048213 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.072759 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/326ec868-d611-435e-9dd2-769dc279c6c5-operator-scripts\") pod \"326ec868-d611-435e-9dd2-769dc279c6c5\" (UID: \"326ec868-d611-435e-9dd2-769dc279c6c5\") " Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.074451 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbt47\" (UniqueName: \"kubernetes.io/projected/326ec868-d611-435e-9dd2-769dc279c6c5-kube-api-access-bbt47\") pod \"326ec868-d611-435e-9dd2-769dc279c6c5\" (UID: \"326ec868-d611-435e-9dd2-769dc279c6c5\") " Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.075599 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/326ec868-d611-435e-9dd2-769dc279c6c5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "326ec868-d611-435e-9dd2-769dc279c6c5" (UID: "326ec868-d611-435e-9dd2-769dc279c6c5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.081532 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/326ec868-d611-435e-9dd2-769dc279c6c5-kube-api-access-bbt47" (OuterVolumeSpecName: "kube-api-access-bbt47") pod "326ec868-d611-435e-9dd2-769dc279c6c5" (UID: "326ec868-d611-435e-9dd2-769dc279c6c5"). InnerVolumeSpecName "kube-api-access-bbt47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.175857 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f867029a-5e81-436e-82ed-d8c3cef5b734-operator-scripts\") pod \"f867029a-5e81-436e-82ed-d8c3cef5b734\" (UID: \"f867029a-5e81-436e-82ed-d8c3cef5b734\") " Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.176391 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-operator-scripts\") pod \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\" (UID: \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\") " Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.176445 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-operator-scripts\") pod \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\" (UID: \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\") " Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.176554 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlqws\" (UniqueName: \"kubernetes.io/projected/f867029a-5e81-436e-82ed-d8c3cef5b734-kube-api-access-wlqws\") pod \"f867029a-5e81-436e-82ed-d8c3cef5b734\" (UID: \"f867029a-5e81-436e-82ed-d8c3cef5b734\") " Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.176577 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvwcr\" (UniqueName: \"kubernetes.io/projected/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-kube-api-access-qvwcr\") pod \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\" (UID: \"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd\") " Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.176627 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jd4d\" (UniqueName: \"kubernetes.io/projected/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-kube-api-access-9jd4d\") pod \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\" (UID: \"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c\") " Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.177082 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/326ec868-d611-435e-9dd2-769dc279c6c5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.177107 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbt47\" (UniqueName: \"kubernetes.io/projected/326ec868-d611-435e-9dd2-769dc279c6c5-kube-api-access-bbt47\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.178112 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cf5afc28-5f57-4d6f-97de-0bac57eb7a1c" (UID: "cf5afc28-5f57-4d6f-97de-0bac57eb7a1c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.178172 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f867029a-5e81-436e-82ed-d8c3cef5b734-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f867029a-5e81-436e-82ed-d8c3cef5b734" (UID: "f867029a-5e81-436e-82ed-d8c3cef5b734"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.178212 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7ad2bcd9-0999-4e06-84d1-aed4c51e4edd" (UID: "7ad2bcd9-0999-4e06-84d1-aed4c51e4edd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.181088 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-kube-api-access-qvwcr" (OuterVolumeSpecName: "kube-api-access-qvwcr") pod "7ad2bcd9-0999-4e06-84d1-aed4c51e4edd" (UID: "7ad2bcd9-0999-4e06-84d1-aed4c51e4edd"). InnerVolumeSpecName "kube-api-access-qvwcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.181207 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-kube-api-access-9jd4d" (OuterVolumeSpecName: "kube-api-access-9jd4d") pod "cf5afc28-5f57-4d6f-97de-0bac57eb7a1c" (UID: "cf5afc28-5f57-4d6f-97de-0bac57eb7a1c"). InnerVolumeSpecName "kube-api-access-9jd4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.181288 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f867029a-5e81-436e-82ed-d8c3cef5b734-kube-api-access-wlqws" (OuterVolumeSpecName: "kube-api-access-wlqws") pod "f867029a-5e81-436e-82ed-d8c3cef5b734" (UID: "f867029a-5e81-436e-82ed-d8c3cef5b734"). InnerVolumeSpecName "kube-api-access-wlqws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.278960 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f867029a-5e81-436e-82ed-d8c3cef5b734-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.278994 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.279003 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.279012 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlqws\" (UniqueName: \"kubernetes.io/projected/f867029a-5e81-436e-82ed-d8c3cef5b734-kube-api-access-wlqws\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.279023 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvwcr\" (UniqueName: \"kubernetes.io/projected/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd-kube-api-access-qvwcr\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.279031 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jd4d\" (UniqueName: \"kubernetes.io/projected/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c-kube-api-access-9jd4d\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.556242 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2ea3-account-create-update-f9hmh" event={"ID":"7ad2bcd9-0999-4e06-84d1-aed4c51e4edd","Type":"ContainerDied","Data":"bf985bc4c29379ee455d64d7c08ef2fe4d7521c9d361067b7fec5c72ed1ea73c"} Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.556289 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf985bc4c29379ee455d64d7c08ef2fe4d7521c9d361067b7fec5c72ed1ea73c" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.556433 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2ea3-account-create-update-f9hmh" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.559797 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2dca-account-create-update-z9bbp" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.559803 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2dca-account-create-update-z9bbp" event={"ID":"cf5afc28-5f57-4d6f-97de-0bac57eb7a1c","Type":"ContainerDied","Data":"36b9f85a03cfd50ee03547bde1fbd44708c8b18807bff159955e75e345fcb347"} Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.559845 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36b9f85a03cfd50ee03547bde1fbd44708c8b18807bff159955e75e345fcb347" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.561800 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7f7c-account-create-update-j9m8h" event={"ID":"326ec868-d611-435e-9dd2-769dc279c6c5","Type":"ContainerDied","Data":"bb9908de18183171b76dcabd5216b202a878f213d0821fdd4c7a08e004d116a9"} Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.561824 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb9908de18183171b76dcabd5216b202a878f213d0821fdd4c7a08e004d116a9" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.561880 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7f7c-account-create-update-j9m8h" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.584065 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-zzrpz" event={"ID":"f867029a-5e81-436e-82ed-d8c3cef5b734","Type":"ContainerDied","Data":"71e529627ce8ca061c5a64e6b2f3840f80bec4a80b728d4729edc9b7f9c83499"} Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.584111 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71e529627ce8ca061c5a64e6b2f3840f80bec4a80b728d4729edc9b7f9c83499" Nov 26 07:13:52 crc kubenswrapper[4940]: I1126 07:13:52.584166 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-zzrpz" Nov 26 07:13:53 crc kubenswrapper[4940]: I1126 07:13:53.178946 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b859c0c-a380-4412-9f3b-6f7de522ad10" path="/var/lib/kubelet/pods/0b859c0c-a380-4412-9f3b-6f7de522ad10/volumes" Nov 26 07:13:56 crc kubenswrapper[4940]: I1126 07:13:56.617973 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mkt8h" event={"ID":"0d22c5e6-6595-4926-9525-cc5e90134b3c","Type":"ContainerStarted","Data":"db3f2b385b9b5f08606b07783338d477465859be0fc661086c63825760b6c395"} Nov 26 07:13:56 crc kubenswrapper[4940]: I1126 07:13:56.641091 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-mkt8h" podStartSLOduration=3.76576785 podStartE2EDuration="9.641071898s" podCreationTimestamp="2025-11-26 07:13:47 +0000 UTC" firstStartedPulling="2025-11-26 07:13:49.85171266 +0000 UTC m=+1131.371854279" lastFinishedPulling="2025-11-26 07:13:55.727016708 +0000 UTC m=+1137.247158327" observedRunningTime="2025-11-26 07:13:56.635293464 +0000 UTC m=+1138.155435083" watchObservedRunningTime="2025-11-26 07:13:56.641071898 +0000 UTC m=+1138.161213517" Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.362445 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.423842 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9fdb784c-j22q2"] Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.424791 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" podUID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" containerName="dnsmasq-dns" containerID="cri-o://f00555917e21bb3d03c30ccb5f3fd5dcc046bf9e7b4651c9f55fd59fc4e5e5d4" gracePeriod=10 Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.647232 4940 generic.go:334] "Generic (PLEG): container finished" podID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" containerID="f00555917e21bb3d03c30ccb5f3fd5dcc046bf9e7b4651c9f55fd59fc4e5e5d4" exitCode=0 Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.647298 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" event={"ID":"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea","Type":"ContainerDied","Data":"f00555917e21bb3d03c30ccb5f3fd5dcc046bf9e7b4651c9f55fd59fc4e5e5d4"} Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.649739 4940 generic.go:334] "Generic (PLEG): container finished" podID="0d22c5e6-6595-4926-9525-cc5e90134b3c" containerID="db3f2b385b9b5f08606b07783338d477465859be0fc661086c63825760b6c395" exitCode=0 Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.649816 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mkt8h" event={"ID":"0d22c5e6-6595-4926-9525-cc5e90134b3c","Type":"ContainerDied","Data":"db3f2b385b9b5f08606b07783338d477465859be0fc661086c63825760b6c395"} Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.867821 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.997188 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-config\") pod \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.997282 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djmbv\" (UniqueName: \"kubernetes.io/projected/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-kube-api-access-djmbv\") pod \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.997340 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-nb\") pod \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.997410 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-dns-svc\") pod \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " Nov 26 07:13:59 crc kubenswrapper[4940]: I1126 07:13:59.997456 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-sb\") pod \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\" (UID: \"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea\") " Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.002350 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-kube-api-access-djmbv" (OuterVolumeSpecName: "kube-api-access-djmbv") pod "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" (UID: "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea"). InnerVolumeSpecName "kube-api-access-djmbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.039029 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-config" (OuterVolumeSpecName: "config") pod "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" (UID: "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.040494 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" (UID: "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.054347 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" (UID: "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.054530 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" (UID: "878d8430-ecb8-4ed4-b6bd-3cd5681e5cea"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.099351 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.099388 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djmbv\" (UniqueName: \"kubernetes.io/projected/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-kube-api-access-djmbv\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.099400 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.099408 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.099416 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.661357 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.661852 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9fdb784c-j22q2" event={"ID":"878d8430-ecb8-4ed4-b6bd-3cd5681e5cea","Type":"ContainerDied","Data":"8cb76cf9fa411ce2a5b3ca045b62bba519f1af37db464368466d6b965aba096b"} Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.671251 4940 scope.go:117] "RemoveContainer" containerID="f00555917e21bb3d03c30ccb5f3fd5dcc046bf9e7b4651c9f55fd59fc4e5e5d4" Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.709761 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9fdb784c-j22q2"] Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.715401 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9fdb784c-j22q2"] Nov 26 07:14:00 crc kubenswrapper[4940]: I1126 07:14:00.717298 4940 scope.go:117] "RemoveContainer" containerID="205e7fae91b54fb19fd64238f1d11edae2df9cf797148fffd87926e8138dfff6" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.182525 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" path="/var/lib/kubelet/pods/878d8430-ecb8-4ed4-b6bd-3cd5681e5cea/volumes" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.483329 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.629723 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-config-data\") pod \"0d22c5e6-6595-4926-9525-cc5e90134b3c\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.629759 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw8rg\" (UniqueName: \"kubernetes.io/projected/0d22c5e6-6595-4926-9525-cc5e90134b3c-kube-api-access-bw8rg\") pod \"0d22c5e6-6595-4926-9525-cc5e90134b3c\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.629881 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-combined-ca-bundle\") pod \"0d22c5e6-6595-4926-9525-cc5e90134b3c\" (UID: \"0d22c5e6-6595-4926-9525-cc5e90134b3c\") " Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.634081 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d22c5e6-6595-4926-9525-cc5e90134b3c-kube-api-access-bw8rg" (OuterVolumeSpecName: "kube-api-access-bw8rg") pod "0d22c5e6-6595-4926-9525-cc5e90134b3c" (UID: "0d22c5e6-6595-4926-9525-cc5e90134b3c"). InnerVolumeSpecName "kube-api-access-bw8rg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.650393 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d22c5e6-6595-4926-9525-cc5e90134b3c" (UID: "0d22c5e6-6595-4926-9525-cc5e90134b3c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.670926 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-config-data" (OuterVolumeSpecName: "config-data") pod "0d22c5e6-6595-4926-9525-cc5e90134b3c" (UID: "0d22c5e6-6595-4926-9525-cc5e90134b3c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.670954 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-mkt8h" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.670973 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-mkt8h" event={"ID":"0d22c5e6-6595-4926-9525-cc5e90134b3c","Type":"ContainerDied","Data":"f4a85f49f74aa1d18876e560fbfba0e52b65ffa06409638560f5ad900ab5fa73"} Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.671009 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4a85f49f74aa1d18876e560fbfba0e52b65ffa06409638560f5ad900ab5fa73" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.732236 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.732267 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bw8rg\" (UniqueName: \"kubernetes.io/projected/0d22c5e6-6595-4926-9525-cc5e90134b3c-kube-api-access-bw8rg\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.732280 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d22c5e6-6595-4926-9525-cc5e90134b3c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.935695 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7dbf8bff67-lw9xc"] Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936015 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cb1ded9-17a3-40e3-955b-11e63806cd6f" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936031 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cb1ded9-17a3-40e3-955b-11e63806cd6f" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936054 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" containerName="init" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936060 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" containerName="init" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936071 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d22c5e6-6595-4926-9525-cc5e90134b3c" containerName="keystone-db-sync" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936079 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d22c5e6-6595-4926-9525-cc5e90134b3c" containerName="keystone-db-sync" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936092 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ad2bcd9-0999-4e06-84d1-aed4c51e4edd" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936100 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ad2bcd9-0999-4e06-84d1-aed4c51e4edd" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936114 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="326ec868-d611-435e-9dd2-769dc279c6c5" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936120 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="326ec868-d611-435e-9dd2-769dc279c6c5" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936131 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b859c0c-a380-4412-9f3b-6f7de522ad10" containerName="init" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936136 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b859c0c-a380-4412-9f3b-6f7de522ad10" containerName="init" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936148 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f867029a-5e81-436e-82ed-d8c3cef5b734" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936154 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f867029a-5e81-436e-82ed-d8c3cef5b734" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936160 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b859c0c-a380-4412-9f3b-6f7de522ad10" containerName="dnsmasq-dns" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936166 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b859c0c-a380-4412-9f3b-6f7de522ad10" containerName="dnsmasq-dns" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936178 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" containerName="dnsmasq-dns" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936185 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" containerName="dnsmasq-dns" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936197 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33462c2c-29fb-487a-8f23-db40ba07be25" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936203 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="33462c2c-29fb-487a-8f23-db40ba07be25" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: E1126 07:14:01.936214 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf5afc28-5f57-4d6f-97de-0bac57eb7a1c" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936220 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf5afc28-5f57-4d6f-97de-0bac57eb7a1c" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936362 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="326ec868-d611-435e-9dd2-769dc279c6c5" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936373 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cb1ded9-17a3-40e3-955b-11e63806cd6f" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936383 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf5afc28-5f57-4d6f-97de-0bac57eb7a1c" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936393 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f867029a-5e81-436e-82ed-d8c3cef5b734" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936403 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="33462c2c-29fb-487a-8f23-db40ba07be25" containerName="mariadb-database-create" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936412 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="878d8430-ecb8-4ed4-b6bd-3cd5681e5cea" containerName="dnsmasq-dns" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936421 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ad2bcd9-0999-4e06-84d1-aed4c51e4edd" containerName="mariadb-account-create-update" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936430 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d22c5e6-6595-4926-9525-cc5e90134b3c" containerName="keystone-db-sync" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.936439 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b859c0c-a380-4412-9f3b-6f7de522ad10" containerName="dnsmasq-dns" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.937269 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.943440 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7dbf8bff67-lw9xc"] Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.958368 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-dt7rt"] Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.963190 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.968130 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.968340 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.968444 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.968354 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 07:14:01 crc kubenswrapper[4940]: I1126 07:14:01.981485 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tgffk" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.006117 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dt7rt"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.035819 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9d4l9\" (UniqueName: \"kubernetes.io/projected/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-kube-api-access-9d4l9\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.035895 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-nb\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.035926 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-swift-storage-0\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.035946 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-config\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.035977 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-sb\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.036011 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-svc\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140110 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-config-data\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140211 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9d4l9\" (UniqueName: \"kubernetes.io/projected/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-kube-api-access-9d4l9\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140240 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-scripts\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140275 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-fernet-keys\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140317 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-credential-keys\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140350 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-nb\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140377 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mth4g\" (UniqueName: \"kubernetes.io/projected/ca3edba8-af15-43ae-812e-627a837eca5c-kube-api-access-mth4g\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140407 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-swift-storage-0\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140434 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-config\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140482 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-sb\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140516 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-combined-ca-bundle\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.140565 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-svc\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.142363 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-config\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.142894 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-swift-storage-0\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.143473 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-sb\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.145281 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-nb\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.146617 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-svc\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.193354 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9d4l9\" (UniqueName: \"kubernetes.io/projected/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-kube-api-access-9d4l9\") pod \"dnsmasq-dns-7dbf8bff67-lw9xc\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.249349 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-combined-ca-bundle\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.249478 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-config-data\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.249591 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-scripts\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.249652 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-fernet-keys\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.249924 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-credential-keys\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.249981 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mth4g\" (UniqueName: \"kubernetes.io/projected/ca3edba8-af15-43ae-812e-627a837eca5c-kube-api-access-mth4g\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.255752 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.276774 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-credential-keys\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.279417 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-fernet-keys\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.286026 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-scripts\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.288872 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-config-data\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.288960 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.300575 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-combined-ca-bundle\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.302249 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mth4g\" (UniqueName: \"kubernetes.io/projected/ca3edba8-af15-43ae-812e-627a837eca5c-kube-api-access-mth4g\") pod \"keystone-bootstrap-dt7rt\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.322825 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.337607 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.338512 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.338740 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.345949 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-g4vh8"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.347156 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.349450 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.349921 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7nzd7" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.350022 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.386941 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-wkhpm"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.388236 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.390646 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-m5df9" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.394866 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-g4vh8"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.409092 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-wkhpm"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.438633 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7dbf8bff67-lw9xc"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.444322 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.444519 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454186 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-run-httpd\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454535 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-config\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454589 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vdz8\" (UniqueName: \"kubernetes.io/projected/c065108a-6b6e-4257-a44e-15182224f721-kube-api-access-4vdz8\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454628 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49wtm\" (UniqueName: \"kubernetes.io/projected/4950f291-1a37-4725-8321-fa2e0c39155e-kube-api-access-49wtm\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454681 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-combined-ca-bundle\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454712 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-config-data\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454736 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454764 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-scripts\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454822 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.454846 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-log-httpd\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.462250 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76c58b6d97-6b6rc"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.463933 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.479562 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-m6x8r"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.480815 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.495398 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-m6x8r"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.508492 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.508743 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-6gcf6" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.508893 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.511722 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76c58b6d97-6b6rc"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.525143 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-l5smw"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.526561 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.539089 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-l5smw"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.539456 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.539752 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-8l299" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.557932 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-config-data\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.557968 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.557999 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-combined-ca-bundle\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558017 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-scripts\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558151 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-svc\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558177 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29f65e2-c5bd-444a-84d1-7532996c10aa-logs\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558198 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-combined-ca-bundle\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558222 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558240 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-log-httpd\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558255 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-scripts\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558276 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-config-data\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558304 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-run-httpd\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558338 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-config\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558360 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s49xb\" (UniqueName: \"kubernetes.io/projected/a29f65e2-c5bd-444a-84d1-7532996c10aa-kube-api-access-s49xb\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558382 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40a16b36-c3e9-4537-bf10-89b685489f39-etc-machine-id\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558410 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-config\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558438 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr6xx\" (UniqueName: \"kubernetes.io/projected/40a16b36-c3e9-4537-bf10-89b685489f39-kube-api-access-pr6xx\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558458 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-nb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558479 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfvxb\" (UniqueName: \"kubernetes.io/projected/0f3bbf96-4fe3-4933-9d70-7328de80dddc-kube-api-access-pfvxb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558503 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-config-data\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558526 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vdz8\" (UniqueName: \"kubernetes.io/projected/c065108a-6b6e-4257-a44e-15182224f721-kube-api-access-4vdz8\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558549 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49wtm\" (UniqueName: \"kubernetes.io/projected/4950f291-1a37-4725-8321-fa2e0c39155e-kube-api-access-49wtm\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558573 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-swift-storage-0\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558600 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-scripts\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558636 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-db-sync-config-data\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558665 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-sb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.558686 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-combined-ca-bundle\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.568706 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-config-data\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.578288 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-log-httpd\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.578980 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-run-httpd\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.584277 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-config\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.584600 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.599850 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.602318 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.602667 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-scripts\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.602920 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-combined-ca-bundle\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.614816 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vdz8\" (UniqueName: \"kubernetes.io/projected/c065108a-6b6e-4257-a44e-15182224f721-kube-api-access-4vdz8\") pod \"ceilometer-0\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.616672 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49wtm\" (UniqueName: \"kubernetes.io/projected/4950f291-1a37-4725-8321-fa2e0c39155e-kube-api-access-49wtm\") pod \"neutron-db-sync-g4vh8\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.647413 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660362 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-scripts\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660419 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-config-data\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660462 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-config\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660493 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s49xb\" (UniqueName: \"kubernetes.io/projected/a29f65e2-c5bd-444a-84d1-7532996c10aa-kube-api-access-s49xb\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660517 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40a16b36-c3e9-4537-bf10-89b685489f39-etc-machine-id\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660552 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr6xx\" (UniqueName: \"kubernetes.io/projected/40a16b36-c3e9-4537-bf10-89b685489f39-kube-api-access-pr6xx\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660576 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-nb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660599 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfvxb\" (UniqueName: \"kubernetes.io/projected/0f3bbf96-4fe3-4933-9d70-7328de80dddc-kube-api-access-pfvxb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660627 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-config-data\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660654 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-swift-storage-0\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660678 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-scripts\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660713 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-db-sync-config-data\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660740 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-sb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660767 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-db-sync-config-data\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660794 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-combined-ca-bundle\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660836 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-combined-ca-bundle\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660865 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcw95\" (UniqueName: \"kubernetes.io/projected/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-kube-api-access-wcw95\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660883 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-svc\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660908 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29f65e2-c5bd-444a-84d1-7532996c10aa-logs\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.660928 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-combined-ca-bundle\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.664598 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40a16b36-c3e9-4537-bf10-89b685489f39-etc-machine-id\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.667383 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-sb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.667816 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-swift-storage-0\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.667967 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-config\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.669119 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-svc\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.669377 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-nb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.669389 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29f65e2-c5bd-444a-84d1-7532996c10aa-logs\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.669666 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.670919 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-config-data\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.673653 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-scripts\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.673915 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-scripts\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.675550 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-combined-ca-bundle\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.677888 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-config-data\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.680614 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-combined-ca-bundle\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.686301 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-db-sync-config-data\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.688971 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfvxb\" (UniqueName: \"kubernetes.io/projected/0f3bbf96-4fe3-4933-9d70-7328de80dddc-kube-api-access-pfvxb\") pod \"dnsmasq-dns-76c58b6d97-6b6rc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.693652 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s49xb\" (UniqueName: \"kubernetes.io/projected/a29f65e2-c5bd-444a-84d1-7532996c10aa-kube-api-access-s49xb\") pod \"placement-db-sync-m6x8r\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.704099 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr6xx\" (UniqueName: \"kubernetes.io/projected/40a16b36-c3e9-4537-bf10-89b685489f39-kube-api-access-pr6xx\") pod \"cinder-db-sync-wkhpm\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.706593 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.755387 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7dbf8bff67-lw9xc"] Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.762661 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-db-sync-config-data\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.762715 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-combined-ca-bundle\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.762767 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcw95\" (UniqueName: \"kubernetes.io/projected/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-kube-api-access-wcw95\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.766627 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-combined-ca-bundle\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.769886 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-db-sync-config-data\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.777389 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcw95\" (UniqueName: \"kubernetes.io/projected/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-kube-api-access-wcw95\") pod \"barbican-db-sync-l5smw\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.864576 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.866484 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:02 crc kubenswrapper[4940]: I1126 07:14:02.904715 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.079876 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.083872 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.088073 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.088846 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.089149 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-q9x2b" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.089387 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.096090 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.170910 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.170959 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.170997 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.171182 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-logs\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.171281 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.171319 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.171344 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9dgh\" (UniqueName: \"kubernetes.io/projected/675727e2-b1e4-49c0-a716-4041804dc79b-kube-api-access-r9dgh\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.171371 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.182982 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dt7rt"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.209970 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.212103 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.224056 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.224375 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.228066 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.293284 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.293376 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.293522 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.294187 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-scripts\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: W1126 07:14:03.294907 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc065108a_6b6e_4257_a44e_15182224f721.slice/crio-4c49fa62184046c774b2fdef3338ace3669890863959bfb3d7acecfa30e34811 WatchSource:0}: Error finding container 4c49fa62184046c774b2fdef3338ace3669890863959bfb3d7acecfa30e34811: Status 404 returned error can't find the container with id 4c49fa62184046c774b2fdef3338ace3669890863959bfb3d7acecfa30e34811 Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.295103 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.296629 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-logs\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.298417 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-logs\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.298547 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlcj2\" (UniqueName: \"kubernetes.io/projected/4e414e04-7428-4559-b1de-b0396416ff9e-kube-api-access-jlcj2\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.298686 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.298773 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.298850 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-config-data\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.299158 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-logs\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.299209 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.299260 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.299299 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9dgh\" (UniqueName: \"kubernetes.io/projected/675727e2-b1e4-49c0-a716-4041804dc79b-kube-api-access-r9dgh\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.299356 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.299413 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.299569 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.301546 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.301841 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.304399 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.306319 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.307911 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.331611 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-g4vh8"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.334651 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9dgh\" (UniqueName: \"kubernetes.io/projected/675727e2-b1e4-49c0-a716-4041804dc79b-kube-api-access-r9dgh\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.340621 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.344508 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: W1126 07:14:03.351915 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40a16b36_c3e9_4537_bf10_89b685489f39.slice/crio-0a6440478e44b1206a6a11d239969e3aeb83818072d60c501d7043fabc7e792a WatchSource:0}: Error finding container 0a6440478e44b1206a6a11d239969e3aeb83818072d60c501d7043fabc7e792a: Status 404 returned error can't find the container with id 0a6440478e44b1206a6a11d239969e3aeb83818072d60c501d7043fabc7e792a Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.360942 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-wkhpm"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.400964 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlcj2\" (UniqueName: \"kubernetes.io/projected/4e414e04-7428-4559-b1de-b0396416ff9e-kube-api-access-jlcj2\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.401282 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.401320 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-config-data\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.401334 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-logs\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.401360 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.401426 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.401444 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.401462 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-scripts\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.401909 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-logs\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.403088 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.403834 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.406485 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-scripts\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.408920 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-config-data\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.409866 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.410664 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.429673 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.435940 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlcj2\" (UniqueName: \"kubernetes.io/projected/4e414e04-7428-4559-b1de-b0396416ff9e-kube-api-access-jlcj2\") pod \"glance-default-external-api-0\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.478580 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.550227 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-l5smw"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.555193 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.560410 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-m6x8r"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.591754 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76c58b6d97-6b6rc"] Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.726846 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" event={"ID":"0f3bbf96-4fe3-4933-9d70-7328de80dddc","Type":"ContainerStarted","Data":"fa7a3a369b37264955dad15502946b76694aa613ddd55a1d66b3add04be6121a"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.729408 4940 generic.go:334] "Generic (PLEG): container finished" podID="d77d5912-6bd4-4cb1-a411-afff1bb8f7af" containerID="71b1735ade117724037391e4f97c5fcccef1a47e3876d8b4345dd4c35a36c1b3" exitCode=0 Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.729634 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" event={"ID":"d77d5912-6bd4-4cb1-a411-afff1bb8f7af","Type":"ContainerDied","Data":"71b1735ade117724037391e4f97c5fcccef1a47e3876d8b4345dd4c35a36c1b3"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.729701 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" event={"ID":"d77d5912-6bd4-4cb1-a411-afff1bb8f7af","Type":"ContainerStarted","Data":"2f22ebfc53b0405400d06603141c215fb899803ff5429546a03d296743fb29d0"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.747495 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerStarted","Data":"4c49fa62184046c774b2fdef3338ace3669890863959bfb3d7acecfa30e34811"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.752022 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-l5smw" event={"ID":"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd","Type":"ContainerStarted","Data":"aa81cac5861157ec500e02f8e364a1e11ffffe879cba5c91b8c36e583aa55643"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.757312 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-m6x8r" event={"ID":"a29f65e2-c5bd-444a-84d1-7532996c10aa","Type":"ContainerStarted","Data":"53942896d19949d138d9829c94a0337400bb686af2af64efe797fcd4d0cf07f3"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.773906 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dt7rt" event={"ID":"ca3edba8-af15-43ae-812e-627a837eca5c","Type":"ContainerStarted","Data":"0fa107bb0b676759b848d5b7a9c66cd2a36b722337627f11a3312667c0397d04"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.773981 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dt7rt" event={"ID":"ca3edba8-af15-43ae-812e-627a837eca5c","Type":"ContainerStarted","Data":"73b22a8b3d5a5572526d988b794b261a22be4a890ef8729b38b64299d99415d4"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.782356 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-g4vh8" event={"ID":"4950f291-1a37-4725-8321-fa2e0c39155e","Type":"ContainerStarted","Data":"b94c506e6086ba8b91c87d459cec6ced6c7d24b11d96185c34ecdfc3ae960f0b"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.782410 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-g4vh8" event={"ID":"4950f291-1a37-4725-8321-fa2e0c39155e","Type":"ContainerStarted","Data":"2b0948f4c9e3bc7cce924994549fafab220e9cbae61c961bc7eed126d9ce8354"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.801331 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-wkhpm" event={"ID":"40a16b36-c3e9-4537-bf10-89b685489f39","Type":"ContainerStarted","Data":"0a6440478e44b1206a6a11d239969e3aeb83818072d60c501d7043fabc7e792a"} Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.805146 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-dt7rt" podStartSLOduration=2.805119209 podStartE2EDuration="2.805119209s" podCreationTimestamp="2025-11-26 07:14:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:03.793644335 +0000 UTC m=+1145.313785954" watchObservedRunningTime="2025-11-26 07:14:03.805119209 +0000 UTC m=+1145.325260838" Nov 26 07:14:03 crc kubenswrapper[4940]: I1126 07:14:03.829294 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-g4vh8" podStartSLOduration=1.829276375 podStartE2EDuration="1.829276375s" podCreationTimestamp="2025-11-26 07:14:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:03.819293658 +0000 UTC m=+1145.339435277" watchObservedRunningTime="2025-11-26 07:14:03.829276375 +0000 UTC m=+1145.349417994" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.079677 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:04 crc kubenswrapper[4940]: W1126 07:14:04.098986 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod675727e2_b1e4_49c0_a716_4041804dc79b.slice/crio-a18898628bb0515470537221c9054fac54cbb6d23c21fe3ccfa2b87692f5e4c3 WatchSource:0}: Error finding container a18898628bb0515470537221c9054fac54cbb6d23c21fe3ccfa2b87692f5e4c3: Status 404 returned error can't find the container with id a18898628bb0515470537221c9054fac54cbb6d23c21fe3ccfa2b87692f5e4c3 Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.277178 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:04 crc kubenswrapper[4940]: W1126 07:14:04.302262 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4e414e04_7428_4559_b1de_b0396416ff9e.slice/crio-1ac55c1423eb13062b9199f797acc94e59a68d25b3137fb618e96f2391a22872 WatchSource:0}: Error finding container 1ac55c1423eb13062b9199f797acc94e59a68d25b3137fb618e96f2391a22872: Status 404 returned error can't find the container with id 1ac55c1423eb13062b9199f797acc94e59a68d25b3137fb618e96f2391a22872 Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.322431 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.428348 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-sb\") pod \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.428393 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-nb\") pod \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.428568 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-svc\") pod \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.428618 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-swift-storage-0\") pod \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.428641 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9d4l9\" (UniqueName: \"kubernetes.io/projected/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-kube-api-access-9d4l9\") pod \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.428690 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-config\") pod \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\" (UID: \"d77d5912-6bd4-4cb1-a411-afff1bb8f7af\") " Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.440250 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-kube-api-access-9d4l9" (OuterVolumeSpecName: "kube-api-access-9d4l9") pod "d77d5912-6bd4-4cb1-a411-afff1bb8f7af" (UID: "d77d5912-6bd4-4cb1-a411-afff1bb8f7af"). InnerVolumeSpecName "kube-api-access-9d4l9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.454072 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d77d5912-6bd4-4cb1-a411-afff1bb8f7af" (UID: "d77d5912-6bd4-4cb1-a411-afff1bb8f7af"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.457377 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d77d5912-6bd4-4cb1-a411-afff1bb8f7af" (UID: "d77d5912-6bd4-4cb1-a411-afff1bb8f7af"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.460016 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d77d5912-6bd4-4cb1-a411-afff1bb8f7af" (UID: "d77d5912-6bd4-4cb1-a411-afff1bb8f7af"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.461714 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-config" (OuterVolumeSpecName: "config") pod "d77d5912-6bd4-4cb1-a411-afff1bb8f7af" (UID: "d77d5912-6bd4-4cb1-a411-afff1bb8f7af"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.472225 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d77d5912-6bd4-4cb1-a411-afff1bb8f7af" (UID: "d77d5912-6bd4-4cb1-a411-afff1bb8f7af"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.532193 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.532257 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.532271 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9d4l9\" (UniqueName: \"kubernetes.io/projected/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-kube-api-access-9d4l9\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.532305 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.532321 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.532331 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d77d5912-6bd4-4cb1-a411-afff1bb8f7af-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.677177 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.763191 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.838693 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.844372 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"675727e2-b1e4-49c0-a716-4041804dc79b","Type":"ContainerStarted","Data":"a18898628bb0515470537221c9054fac54cbb6d23c21fe3ccfa2b87692f5e4c3"} Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.846115 4940 generic.go:334] "Generic (PLEG): container finished" podID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" containerID="3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b" exitCode=0 Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.846164 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" event={"ID":"0f3bbf96-4fe3-4933-9d70-7328de80dddc","Type":"ContainerDied","Data":"3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b"} Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.874959 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" event={"ID":"d77d5912-6bd4-4cb1-a411-afff1bb8f7af","Type":"ContainerDied","Data":"2f22ebfc53b0405400d06603141c215fb899803ff5429546a03d296743fb29d0"} Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.875051 4940 scope.go:117] "RemoveContainer" containerID="71b1735ade117724037391e4f97c5fcccef1a47e3876d8b4345dd4c35a36c1b3" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.875202 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7dbf8bff67-lw9xc" Nov 26 07:14:04 crc kubenswrapper[4940]: I1126 07:14:04.887114 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4e414e04-7428-4559-b1de-b0396416ff9e","Type":"ContainerStarted","Data":"1ac55c1423eb13062b9199f797acc94e59a68d25b3137fb618e96f2391a22872"} Nov 26 07:14:05 crc kubenswrapper[4940]: I1126 07:14:05.073329 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7dbf8bff67-lw9xc"] Nov 26 07:14:05 crc kubenswrapper[4940]: I1126 07:14:05.081456 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7dbf8bff67-lw9xc"] Nov 26 07:14:05 crc kubenswrapper[4940]: I1126 07:14:05.192476 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d77d5912-6bd4-4cb1-a411-afff1bb8f7af" path="/var/lib/kubelet/pods/d77d5912-6bd4-4cb1-a411-afff1bb8f7af/volumes" Nov 26 07:14:05 crc kubenswrapper[4940]: I1126 07:14:05.905571 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"675727e2-b1e4-49c0-a716-4041804dc79b","Type":"ContainerStarted","Data":"923d34e6b2e5bfbbf0b8541753bb0c5b0230df5aaff92e16a831163b6ad07e68"} Nov 26 07:14:05 crc kubenswrapper[4940]: I1126 07:14:05.909383 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" event={"ID":"0f3bbf96-4fe3-4933-9d70-7328de80dddc","Type":"ContainerStarted","Data":"63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc"} Nov 26 07:14:05 crc kubenswrapper[4940]: I1126 07:14:05.911107 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:05 crc kubenswrapper[4940]: I1126 07:14:05.916108 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4e414e04-7428-4559-b1de-b0396416ff9e","Type":"ContainerStarted","Data":"1b0b7a8153fa17b875b912905ca3eeecef250cea4ba30fadfa693ec0401d9e0a"} Nov 26 07:14:05 crc kubenswrapper[4940]: I1126 07:14:05.934468 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" podStartSLOduration=3.934453911 podStartE2EDuration="3.934453911s" podCreationTimestamp="2025-11-26 07:14:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:05.933197392 +0000 UTC m=+1147.453339031" watchObservedRunningTime="2025-11-26 07:14:05.934453911 +0000 UTC m=+1147.454595520" Nov 26 07:14:06 crc kubenswrapper[4940]: I1126 07:14:06.937564 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"675727e2-b1e4-49c0-a716-4041804dc79b","Type":"ContainerStarted","Data":"fff396a7a112d9acf15be68bef6b9cd50dc3a56c8ac0723a9df49efb0aeed101"} Nov 26 07:14:06 crc kubenswrapper[4940]: I1126 07:14:06.937748 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" containerName="glance-httpd" containerID="cri-o://fff396a7a112d9acf15be68bef6b9cd50dc3a56c8ac0723a9df49efb0aeed101" gracePeriod=30 Nov 26 07:14:06 crc kubenswrapper[4940]: I1126 07:14:06.937743 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" containerName="glance-log" containerID="cri-o://923d34e6b2e5bfbbf0b8541753bb0c5b0230df5aaff92e16a831163b6ad07e68" gracePeriod=30 Nov 26 07:14:06 crc kubenswrapper[4940]: I1126 07:14:06.962246 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" containerName="glance-log" containerID="cri-o://1b0b7a8153fa17b875b912905ca3eeecef250cea4ba30fadfa693ec0401d9e0a" gracePeriod=30 Nov 26 07:14:06 crc kubenswrapper[4940]: I1126 07:14:06.962567 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4e414e04-7428-4559-b1de-b0396416ff9e","Type":"ContainerStarted","Data":"b50c6efb974830e763174531b560df630b1d175d6e6f53275c7e3695c999a78a"} Nov 26 07:14:06 crc kubenswrapper[4940]: I1126 07:14:06.962867 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" containerName="glance-httpd" containerID="cri-o://b50c6efb974830e763174531b560df630b1d175d6e6f53275c7e3695c999a78a" gracePeriod=30 Nov 26 07:14:06 crc kubenswrapper[4940]: I1126 07:14:06.965023 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.965003675 podStartE2EDuration="4.965003675s" podCreationTimestamp="2025-11-26 07:14:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:06.959523332 +0000 UTC m=+1148.479664951" watchObservedRunningTime="2025-11-26 07:14:06.965003675 +0000 UTC m=+1148.485145294" Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.972426 4940 generic.go:334] "Generic (PLEG): container finished" podID="ca3edba8-af15-43ae-812e-627a837eca5c" containerID="0fa107bb0b676759b848d5b7a9c66cd2a36b722337627f11a3312667c0397d04" exitCode=0 Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.972526 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dt7rt" event={"ID":"ca3edba8-af15-43ae-812e-627a837eca5c","Type":"ContainerDied","Data":"0fa107bb0b676759b848d5b7a9c66cd2a36b722337627f11a3312667c0397d04"} Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.975021 4940 generic.go:334] "Generic (PLEG): container finished" podID="675727e2-b1e4-49c0-a716-4041804dc79b" containerID="fff396a7a112d9acf15be68bef6b9cd50dc3a56c8ac0723a9df49efb0aeed101" exitCode=0 Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.975317 4940 generic.go:334] "Generic (PLEG): container finished" podID="675727e2-b1e4-49c0-a716-4041804dc79b" containerID="923d34e6b2e5bfbbf0b8541753bb0c5b0230df5aaff92e16a831163b6ad07e68" exitCode=143 Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.975066 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"675727e2-b1e4-49c0-a716-4041804dc79b","Type":"ContainerDied","Data":"fff396a7a112d9acf15be68bef6b9cd50dc3a56c8ac0723a9df49efb0aeed101"} Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.975379 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"675727e2-b1e4-49c0-a716-4041804dc79b","Type":"ContainerDied","Data":"923d34e6b2e5bfbbf0b8541753bb0c5b0230df5aaff92e16a831163b6ad07e68"} Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.990064 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.990033745 podStartE2EDuration="5.990033745s" podCreationTimestamp="2025-11-26 07:14:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:06.989303266 +0000 UTC m=+1148.509444885" watchObservedRunningTime="2025-11-26 07:14:07.990033745 +0000 UTC m=+1149.510175364" Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.996362 4940 generic.go:334] "Generic (PLEG): container finished" podID="4e414e04-7428-4559-b1de-b0396416ff9e" containerID="b50c6efb974830e763174531b560df630b1d175d6e6f53275c7e3695c999a78a" exitCode=0 Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.996396 4940 generic.go:334] "Generic (PLEG): container finished" podID="4e414e04-7428-4559-b1de-b0396416ff9e" containerID="1b0b7a8153fa17b875b912905ca3eeecef250cea4ba30fadfa693ec0401d9e0a" exitCode=143 Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.996447 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4e414e04-7428-4559-b1de-b0396416ff9e","Type":"ContainerDied","Data":"b50c6efb974830e763174531b560df630b1d175d6e6f53275c7e3695c999a78a"} Nov 26 07:14:07 crc kubenswrapper[4940]: I1126 07:14:07.996506 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4e414e04-7428-4559-b1de-b0396416ff9e","Type":"ContainerDied","Data":"1b0b7a8153fa17b875b912905ca3eeecef250cea4ba30fadfa693ec0401d9e0a"} Nov 26 07:14:12 crc kubenswrapper[4940]: I1126 07:14:12.865874 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:12 crc kubenswrapper[4940]: I1126 07:14:12.940432 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6856c564b9-5c8k6"] Nov 26 07:14:12 crc kubenswrapper[4940]: I1126 07:14:12.940709 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="dnsmasq-dns" containerID="cri-o://0c1c686c9ef7059d8ab567ae5883d0248649e6a929c20f2da67127f20b09b2d0" gracePeriod=10 Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.048137 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4e414e04-7428-4559-b1de-b0396416ff9e","Type":"ContainerDied","Data":"1ac55c1423eb13062b9199f797acc94e59a68d25b3137fb618e96f2391a22872"} Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.048181 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ac55c1423eb13062b9199f797acc94e59a68d25b3137fb618e96f2391a22872" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.049092 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.110493 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-scripts\") pod \"4e414e04-7428-4559-b1de-b0396416ff9e\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.110637 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"4e414e04-7428-4559-b1de-b0396416ff9e\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.110731 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-httpd-run\") pod \"4e414e04-7428-4559-b1de-b0396416ff9e\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.110774 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlcj2\" (UniqueName: \"kubernetes.io/projected/4e414e04-7428-4559-b1de-b0396416ff9e-kube-api-access-jlcj2\") pod \"4e414e04-7428-4559-b1de-b0396416ff9e\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.110802 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-public-tls-certs\") pod \"4e414e04-7428-4559-b1de-b0396416ff9e\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.110893 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-combined-ca-bundle\") pod \"4e414e04-7428-4559-b1de-b0396416ff9e\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.110964 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-logs\") pod \"4e414e04-7428-4559-b1de-b0396416ff9e\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.110990 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-config-data\") pod \"4e414e04-7428-4559-b1de-b0396416ff9e\" (UID: \"4e414e04-7428-4559-b1de-b0396416ff9e\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.111353 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4e414e04-7428-4559-b1de-b0396416ff9e" (UID: "4e414e04-7428-4559-b1de-b0396416ff9e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.111642 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.111963 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-logs" (OuterVolumeSpecName: "logs") pod "4e414e04-7428-4559-b1de-b0396416ff9e" (UID: "4e414e04-7428-4559-b1de-b0396416ff9e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.120213 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "4e414e04-7428-4559-b1de-b0396416ff9e" (UID: "4e414e04-7428-4559-b1de-b0396416ff9e"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.120297 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-scripts" (OuterVolumeSpecName: "scripts") pod "4e414e04-7428-4559-b1de-b0396416ff9e" (UID: "4e414e04-7428-4559-b1de-b0396416ff9e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.127359 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e414e04-7428-4559-b1de-b0396416ff9e-kube-api-access-jlcj2" (OuterVolumeSpecName: "kube-api-access-jlcj2") pod "4e414e04-7428-4559-b1de-b0396416ff9e" (UID: "4e414e04-7428-4559-b1de-b0396416ff9e"). InnerVolumeSpecName "kube-api-access-jlcj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.149398 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4e414e04-7428-4559-b1de-b0396416ff9e" (UID: "4e414e04-7428-4559-b1de-b0396416ff9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.170772 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4e414e04-7428-4559-b1de-b0396416ff9e" (UID: "4e414e04-7428-4559-b1de-b0396416ff9e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.181555 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-config-data" (OuterVolumeSpecName: "config-data") pod "4e414e04-7428-4559-b1de-b0396416ff9e" (UID: "4e414e04-7428-4559-b1de-b0396416ff9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.213209 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.213243 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlcj2\" (UniqueName: \"kubernetes.io/projected/4e414e04-7428-4559-b1de-b0396416ff9e-kube-api-access-jlcj2\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.213259 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.213268 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.213278 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4e414e04-7428-4559-b1de-b0396416ff9e-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.213287 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.213297 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e414e04-7428-4559-b1de-b0396416ff9e-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.241729 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.314978 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.765269 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.823101 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-fernet-keys\") pod \"ca3edba8-af15-43ae-812e-627a837eca5c\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.823168 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-scripts\") pod \"ca3edba8-af15-43ae-812e-627a837eca5c\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.823190 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-combined-ca-bundle\") pod \"ca3edba8-af15-43ae-812e-627a837eca5c\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.823322 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-config-data\") pod \"ca3edba8-af15-43ae-812e-627a837eca5c\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.823403 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-credential-keys\") pod \"ca3edba8-af15-43ae-812e-627a837eca5c\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.823498 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mth4g\" (UniqueName: \"kubernetes.io/projected/ca3edba8-af15-43ae-812e-627a837eca5c-kube-api-access-mth4g\") pod \"ca3edba8-af15-43ae-812e-627a837eca5c\" (UID: \"ca3edba8-af15-43ae-812e-627a837eca5c\") " Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.828005 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-scripts" (OuterVolumeSpecName: "scripts") pod "ca3edba8-af15-43ae-812e-627a837eca5c" (UID: "ca3edba8-af15-43ae-812e-627a837eca5c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.828245 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ca3edba8-af15-43ae-812e-627a837eca5c" (UID: "ca3edba8-af15-43ae-812e-627a837eca5c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.828427 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ca3edba8-af15-43ae-812e-627a837eca5c" (UID: "ca3edba8-af15-43ae-812e-627a837eca5c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.830058 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca3edba8-af15-43ae-812e-627a837eca5c-kube-api-access-mth4g" (OuterVolumeSpecName: "kube-api-access-mth4g") pod "ca3edba8-af15-43ae-812e-627a837eca5c" (UID: "ca3edba8-af15-43ae-812e-627a837eca5c"). InnerVolumeSpecName "kube-api-access-mth4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.851878 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ca3edba8-af15-43ae-812e-627a837eca5c" (UID: "ca3edba8-af15-43ae-812e-627a837eca5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.864918 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-config-data" (OuterVolumeSpecName: "config-data") pod "ca3edba8-af15-43ae-812e-627a837eca5c" (UID: "ca3edba8-af15-43ae-812e-627a837eca5c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.926230 4940 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.926263 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.926277 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.926290 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.926302 4940 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca3edba8-af15-43ae-812e-627a837eca5c-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:13 crc kubenswrapper[4940]: I1126 07:14:13.926313 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mth4g\" (UniqueName: \"kubernetes.io/projected/ca3edba8-af15-43ae-812e-627a837eca5c-kube-api-access-mth4g\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.059537 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dt7rt" event={"ID":"ca3edba8-af15-43ae-812e-627a837eca5c","Type":"ContainerDied","Data":"73b22a8b3d5a5572526d988b794b261a22be4a890ef8729b38b64299d99415d4"} Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.059588 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73b22a8b3d5a5572526d988b794b261a22be4a890ef8729b38b64299d99415d4" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.059732 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dt7rt" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.069786 4940 generic.go:334] "Generic (PLEG): container finished" podID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerID="0c1c686c9ef7059d8ab567ae5883d0248649e6a929c20f2da67127f20b09b2d0" exitCode=0 Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.069868 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" event={"ID":"1c6385f9-fbe8-4b99-be0a-ed858327c085","Type":"ContainerDied","Data":"0c1c686c9ef7059d8ab567ae5883d0248649e6a929c20f2da67127f20b09b2d0"} Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.069886 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.109544 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.116502 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.134935 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:14 crc kubenswrapper[4940]: E1126 07:14:14.135285 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77d5912-6bd4-4cb1-a411-afff1bb8f7af" containerName="init" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.135301 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77d5912-6bd4-4cb1-a411-afff1bb8f7af" containerName="init" Nov 26 07:14:14 crc kubenswrapper[4940]: E1126 07:14:14.135315 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" containerName="glance-httpd" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.135322 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" containerName="glance-httpd" Nov 26 07:14:14 crc kubenswrapper[4940]: E1126 07:14:14.135340 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca3edba8-af15-43ae-812e-627a837eca5c" containerName="keystone-bootstrap" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.135345 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca3edba8-af15-43ae-812e-627a837eca5c" containerName="keystone-bootstrap" Nov 26 07:14:14 crc kubenswrapper[4940]: E1126 07:14:14.135359 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" containerName="glance-log" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.135366 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" containerName="glance-log" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.135670 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" containerName="glance-httpd" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.135684 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d77d5912-6bd4-4cb1-a411-afff1bb8f7af" containerName="init" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.135694 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" containerName="glance-log" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.135706 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca3edba8-af15-43ae-812e-627a837eca5c" containerName="keystone-bootstrap" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.136861 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.142874 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.143089 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.162475 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.232495 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-config-data\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.232594 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnczv\" (UniqueName: \"kubernetes.io/projected/8682f23a-0622-456d-b9ca-59ffb9fdad24-kube-api-access-vnczv\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.232634 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-logs\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.232670 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-scripts\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.232736 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.232780 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.232816 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.232874 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.333591 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.333666 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-config-data\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.333719 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnczv\" (UniqueName: \"kubernetes.io/projected/8682f23a-0622-456d-b9ca-59ffb9fdad24-kube-api-access-vnczv\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.333755 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-logs\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.333778 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-scripts\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.333826 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.333859 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.333893 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.334349 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-logs\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.334381 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.334389 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.346941 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-config-data\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.347940 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.348140 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnczv\" (UniqueName: \"kubernetes.io/projected/8682f23a-0622-456d-b9ca-59ffb9fdad24-kube-api-access-vnczv\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.354487 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-scripts\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.355078 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.361970 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: connect: connection refused" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.368005 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.462481 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.915031 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-dt7rt"] Nov 26 07:14:14 crc kubenswrapper[4940]: I1126 07:14:14.923137 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-dt7rt"] Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.029441 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-xpsrr"] Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.030750 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.033487 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.033897 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.034149 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tgffk" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.034665 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.035018 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.036403 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xpsrr"] Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.045322 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-credential-keys\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.045576 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfzsg\" (UniqueName: \"kubernetes.io/projected/1ffadde9-94cd-4f89-b270-e4a533f5399c-kube-api-access-rfzsg\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.045700 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-config-data\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.045818 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-combined-ca-bundle\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.049237 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-fernet-keys\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.049480 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-scripts\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.150376 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-credential-keys\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.150609 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfzsg\" (UniqueName: \"kubernetes.io/projected/1ffadde9-94cd-4f89-b270-e4a533f5399c-kube-api-access-rfzsg\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.150728 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-config-data\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.151208 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-combined-ca-bundle\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.151393 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-fernet-keys\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.151533 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-scripts\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.156498 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-credential-keys\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.156952 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-config-data\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.157113 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-fernet-keys\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.157970 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-combined-ca-bundle\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.160336 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-scripts\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.172200 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfzsg\" (UniqueName: \"kubernetes.io/projected/1ffadde9-94cd-4f89-b270-e4a533f5399c-kube-api-access-rfzsg\") pod \"keystone-bootstrap-xpsrr\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.179028 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e414e04-7428-4559-b1de-b0396416ff9e" path="/var/lib/kubelet/pods/4e414e04-7428-4559-b1de-b0396416ff9e/volumes" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.179624 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca3edba8-af15-43ae-812e-627a837eca5c" path="/var/lib/kubelet/pods/ca3edba8-af15-43ae-812e-627a837eca5c/volumes" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.357764 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.944383 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.963403 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-logs\") pod \"675727e2-b1e4-49c0-a716-4041804dc79b\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.963447 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9dgh\" (UniqueName: \"kubernetes.io/projected/675727e2-b1e4-49c0-a716-4041804dc79b-kube-api-access-r9dgh\") pod \"675727e2-b1e4-49c0-a716-4041804dc79b\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.963467 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-scripts\") pod \"675727e2-b1e4-49c0-a716-4041804dc79b\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.963499 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-combined-ca-bundle\") pod \"675727e2-b1e4-49c0-a716-4041804dc79b\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.963537 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"675727e2-b1e4-49c0-a716-4041804dc79b\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.963555 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-config-data\") pod \"675727e2-b1e4-49c0-a716-4041804dc79b\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.964908 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-logs" (OuterVolumeSpecName: "logs") pod "675727e2-b1e4-49c0-a716-4041804dc79b" (UID: "675727e2-b1e4-49c0-a716-4041804dc79b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.969616 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-scripts" (OuterVolumeSpecName: "scripts") pod "675727e2-b1e4-49c0-a716-4041804dc79b" (UID: "675727e2-b1e4-49c0-a716-4041804dc79b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.971361 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-internal-tls-certs\") pod \"675727e2-b1e4-49c0-a716-4041804dc79b\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.971668 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-httpd-run\") pod \"675727e2-b1e4-49c0-a716-4041804dc79b\" (UID: \"675727e2-b1e4-49c0-a716-4041804dc79b\") " Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.971967 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "675727e2-b1e4-49c0-a716-4041804dc79b" (UID: "675727e2-b1e4-49c0-a716-4041804dc79b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.972675 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.972775 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.972901 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675727e2-b1e4-49c0-a716-4041804dc79b-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.975335 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "675727e2-b1e4-49c0-a716-4041804dc79b" (UID: "675727e2-b1e4-49c0-a716-4041804dc79b"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:14:15 crc kubenswrapper[4940]: I1126 07:14:15.977677 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/675727e2-b1e4-49c0-a716-4041804dc79b-kube-api-access-r9dgh" (OuterVolumeSpecName: "kube-api-access-r9dgh") pod "675727e2-b1e4-49c0-a716-4041804dc79b" (UID: "675727e2-b1e4-49c0-a716-4041804dc79b"). InnerVolumeSpecName "kube-api-access-r9dgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.002775 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "675727e2-b1e4-49c0-a716-4041804dc79b" (UID: "675727e2-b1e4-49c0-a716-4041804dc79b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.029674 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "675727e2-b1e4-49c0-a716-4041804dc79b" (UID: "675727e2-b1e4-49c0-a716-4041804dc79b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.031720 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-config-data" (OuterVolumeSpecName: "config-data") pod "675727e2-b1e4-49c0-a716-4041804dc79b" (UID: "675727e2-b1e4-49c0-a716-4041804dc79b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.074747 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9dgh\" (UniqueName: \"kubernetes.io/projected/675727e2-b1e4-49c0-a716-4041804dc79b-kube-api-access-r9dgh\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.074848 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.074888 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.074943 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.074957 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/675727e2-b1e4-49c0-a716-4041804dc79b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.090117 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"675727e2-b1e4-49c0-a716-4041804dc79b","Type":"ContainerDied","Data":"a18898628bb0515470537221c9054fac54cbb6d23c21fe3ccfa2b87692f5e4c3"} Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.090168 4940 scope.go:117] "RemoveContainer" containerID="fff396a7a112d9acf15be68bef6b9cd50dc3a56c8ac0723a9df49efb0aeed101" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.090202 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.093842 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.148583 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.158686 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.169546 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:16 crc kubenswrapper[4940]: E1126 07:14:16.169874 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" containerName="glance-log" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.169891 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" containerName="glance-log" Nov 26 07:14:16 crc kubenswrapper[4940]: E1126 07:14:16.169908 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" containerName="glance-httpd" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.169915 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" containerName="glance-httpd" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.170169 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" containerName="glance-log" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.170190 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" containerName="glance-httpd" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.171026 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.173086 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.173542 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.175973 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.176510 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.277653 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.277937 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-scripts\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.278027 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-logs\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.278171 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-config-data\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.278311 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.278379 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz8sk\" (UniqueName: \"kubernetes.io/projected/058d948e-a28a-43f1-91f6-d310742359ef-kube-api-access-zz8sk\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.278430 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.278546 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.380644 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.380880 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.380978 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-scripts\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.381105 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-logs\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.381157 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-config-data\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.381172 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.381228 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.381296 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz8sk\" (UniqueName: \"kubernetes.io/projected/058d948e-a28a-43f1-91f6-d310742359ef-kube-api-access-zz8sk\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.381360 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.381917 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-logs\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.383508 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.387089 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-config-data\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.393072 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.393167 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.396378 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-scripts\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.401723 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz8sk\" (UniqueName: \"kubernetes.io/projected/058d948e-a28a-43f1-91f6-d310742359ef-kube-api-access-zz8sk\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.419383 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:14:16 crc kubenswrapper[4940]: I1126 07:14:16.519692 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:17 crc kubenswrapper[4940]: I1126 07:14:17.176387 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="675727e2-b1e4-49c0-a716-4041804dc79b" path="/var/lib/kubelet/pods/675727e2-b1e4-49c0-a716-4041804dc79b/volumes" Nov 26 07:14:19 crc kubenswrapper[4940]: I1126 07:14:19.361994 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: connect: connection refused" Nov 26 07:14:21 crc kubenswrapper[4940]: I1126 07:14:21.728737 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:14:21 crc kubenswrapper[4940]: I1126 07:14:21.729142 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:14:24 crc kubenswrapper[4940]: I1126 07:14:24.841511 4940 scope.go:117] "RemoveContainer" containerID="923d34e6b2e5bfbbf0b8541753bb0c5b0230df5aaff92e16a831163b6ad07e68" Nov 26 07:14:24 crc kubenswrapper[4940]: E1126 07:14:24.855458 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879" Nov 26 07:14:24 crc kubenswrapper[4940]: E1126 07:14:24.855617 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pr6xx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-wkhpm_openstack(40a16b36-c3e9-4537-bf10-89b685489f39): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:14:24 crc kubenswrapper[4940]: E1126 07:14:24.856834 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-wkhpm" podUID="40a16b36-c3e9-4537-bf10-89b685489f39" Nov 26 07:14:24 crc kubenswrapper[4940]: I1126 07:14:24.994049 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.074882 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-config\") pod \"1c6385f9-fbe8-4b99-be0a-ed858327c085\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.074973 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-svc\") pod \"1c6385f9-fbe8-4b99-be0a-ed858327c085\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.075053 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-nb\") pod \"1c6385f9-fbe8-4b99-be0a-ed858327c085\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.075133 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-swift-storage-0\") pod \"1c6385f9-fbe8-4b99-be0a-ed858327c085\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.075177 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-sb\") pod \"1c6385f9-fbe8-4b99-be0a-ed858327c085\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.075253 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bkxl\" (UniqueName: \"kubernetes.io/projected/1c6385f9-fbe8-4b99-be0a-ed858327c085-kube-api-access-7bkxl\") pod \"1c6385f9-fbe8-4b99-be0a-ed858327c085\" (UID: \"1c6385f9-fbe8-4b99-be0a-ed858327c085\") " Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.083353 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c6385f9-fbe8-4b99-be0a-ed858327c085-kube-api-access-7bkxl" (OuterVolumeSpecName: "kube-api-access-7bkxl") pod "1c6385f9-fbe8-4b99-be0a-ed858327c085" (UID: "1c6385f9-fbe8-4b99-be0a-ed858327c085"). InnerVolumeSpecName "kube-api-access-7bkxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.116987 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1c6385f9-fbe8-4b99-be0a-ed858327c085" (UID: "1c6385f9-fbe8-4b99-be0a-ed858327c085"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.118636 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1c6385f9-fbe8-4b99-be0a-ed858327c085" (UID: "1c6385f9-fbe8-4b99-be0a-ed858327c085"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.131637 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1c6385f9-fbe8-4b99-be0a-ed858327c085" (UID: "1c6385f9-fbe8-4b99-be0a-ed858327c085"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.134338 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1c6385f9-fbe8-4b99-be0a-ed858327c085" (UID: "1c6385f9-fbe8-4b99-be0a-ed858327c085"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.171098 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-config" (OuterVolumeSpecName: "config") pod "1c6385f9-fbe8-4b99-be0a-ed858327c085" (UID: "1c6385f9-fbe8-4b99-be0a-ed858327c085"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.183203 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.183238 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.183260 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.183271 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bkxl\" (UniqueName: \"kubernetes.io/projected/1c6385f9-fbe8-4b99-be0a-ed858327c085-kube-api-access-7bkxl\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.183287 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.183299 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c6385f9-fbe8-4b99-be0a-ed858327c085-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.195116 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" Nov 26 07:14:25 crc kubenswrapper[4940]: E1126 07:14:25.203137 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879\\\"\"" pod="openstack/cinder-db-sync-wkhpm" podUID="40a16b36-c3e9-4537-bf10-89b685489f39" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.241977 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" event={"ID":"1c6385f9-fbe8-4b99-be0a-ed858327c085","Type":"ContainerDied","Data":"f78215ca99f0332c05da3e7012d2e2d387568a91d53f393ce4f6d88c7b721949"} Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.242034 4940 scope.go:117] "RemoveContainer" containerID="0c1c686c9ef7059d8ab567ae5883d0248649e6a929c20f2da67127f20b09b2d0" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.281667 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6856c564b9-5c8k6"] Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.288203 4940 scope.go:117] "RemoveContainer" containerID="8fd2f9b1a0537af60c43b1fc9f7d253646e9f4a6e0f1379d2ec9fe17df7506cf" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.295542 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6856c564b9-5c8k6"] Nov 26 07:14:25 crc kubenswrapper[4940]: E1126 07:14:25.413273 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c6385f9_fbe8_4b99_be0a_ed858327c085.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c6385f9_fbe8_4b99_be0a_ed858327c085.slice/crio-f78215ca99f0332c05da3e7012d2e2d387568a91d53f393ce4f6d88c7b721949\": RecentStats: unable to find data in memory cache]" Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.418881 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xpsrr"] Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.437330 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:14:25 crc kubenswrapper[4940]: W1126 07:14:25.440272 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8682f23a_0622_456d_b9ca_59ffb9fdad24.slice/crio-9bcb321aa1700ff6b6d7c386295b1738e703ed0628eaef16b93b6a4945efcba0 WatchSource:0}: Error finding container 9bcb321aa1700ff6b6d7c386295b1738e703ed0628eaef16b93b6a4945efcba0: Status 404 returned error can't find the container with id 9bcb321aa1700ff6b6d7c386295b1738e703ed0628eaef16b93b6a4945efcba0 Nov 26 07:14:25 crc kubenswrapper[4940]: I1126 07:14:25.561372 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.206290 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerStarted","Data":"c371286dd54e7a4ea39d1cf40298d2fd48e36ed978e9d3f1d34183d5e0e6d5d0"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.209186 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-l5smw" event={"ID":"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd","Type":"ContainerStarted","Data":"939ccd4afccd08df91420e1a45d12166e869acef7735945e6dcdc351f2148b88"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.212201 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-m6x8r" event={"ID":"a29f65e2-c5bd-444a-84d1-7532996c10aa","Type":"ContainerStarted","Data":"95344aa6ca07c1a8566037a544ee375fe3f57a2f83ff0864fdf7dc405181e926"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.214651 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"058d948e-a28a-43f1-91f6-d310742359ef","Type":"ContainerStarted","Data":"db1432ea91ace69890bd709f4b407b488030b20bee761e31c16f169a68c5c37c"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.218094 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8682f23a-0622-456d-b9ca-59ffb9fdad24","Type":"ContainerStarted","Data":"3ffa118f5f527489dc345765e83342efe059f394a1846bd7fa6ddd85e3436876"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.218189 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8682f23a-0622-456d-b9ca-59ffb9fdad24","Type":"ContainerStarted","Data":"9bcb321aa1700ff6b6d7c386295b1738e703ed0628eaef16b93b6a4945efcba0"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.226856 4940 generic.go:334] "Generic (PLEG): container finished" podID="4950f291-1a37-4725-8321-fa2e0c39155e" containerID="b94c506e6086ba8b91c87d459cec6ced6c7d24b11d96185c34ecdfc3ae960f0b" exitCode=0 Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.226936 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-g4vh8" event={"ID":"4950f291-1a37-4725-8321-fa2e0c39155e","Type":"ContainerDied","Data":"b94c506e6086ba8b91c87d459cec6ced6c7d24b11d96185c34ecdfc3ae960f0b"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.230384 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-l5smw" podStartSLOduration=2.968759843 podStartE2EDuration="24.230344565s" podCreationTimestamp="2025-11-26 07:14:02 +0000 UTC" firstStartedPulling="2025-11-26 07:14:03.602255774 +0000 UTC m=+1145.122397393" lastFinishedPulling="2025-11-26 07:14:24.863840476 +0000 UTC m=+1166.383982115" observedRunningTime="2025-11-26 07:14:26.22578054 +0000 UTC m=+1167.745922169" watchObservedRunningTime="2025-11-26 07:14:26.230344565 +0000 UTC m=+1167.750486184" Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.233341 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xpsrr" event={"ID":"1ffadde9-94cd-4f89-b270-e4a533f5399c","Type":"ContainerStarted","Data":"2227bb45b35e6b1fcd5af6d0afba00e9a99c6d24596b85f6b4a5623ebda271e8"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.233665 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xpsrr" event={"ID":"1ffadde9-94cd-4f89-b270-e4a533f5399c","Type":"ContainerStarted","Data":"913572365ff8da06f2b7b2acc0059dac81ee767e01ce81f24c3f1e7588697c80"} Nov 26 07:14:26 crc kubenswrapper[4940]: I1126 07:14:26.248175 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-m6x8r" podStartSLOduration=2.948230383 podStartE2EDuration="24.2481568s" podCreationTimestamp="2025-11-26 07:14:02 +0000 UTC" firstStartedPulling="2025-11-26 07:14:03.563973321 +0000 UTC m=+1145.084114940" lastFinishedPulling="2025-11-26 07:14:24.863899738 +0000 UTC m=+1166.384041357" observedRunningTime="2025-11-26 07:14:26.24152042 +0000 UTC m=+1167.761662039" watchObservedRunningTime="2025-11-26 07:14:26.2481568 +0000 UTC m=+1167.768298419" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.176407 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" path="/var/lib/kubelet/pods/1c6385f9-fbe8-4b99-be0a-ed858327c085/volumes" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.257344 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8682f23a-0622-456d-b9ca-59ffb9fdad24","Type":"ContainerStarted","Data":"c699e8ed09c1b1959948a2a83983b2a73362370ad13942383ea39dc8e7bc31d5"} Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.276701 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerStarted","Data":"3f826e94f4b99a33ae2fbb54cec7a37b25ae6126d27f6602e2d190853c003c6a"} Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.278649 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"058d948e-a28a-43f1-91f6-d310742359ef","Type":"ContainerStarted","Data":"51ad24223d851461f35281c479e625834cc22512617bb363aa06d8deb4fd3260"} Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.278696 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"058d948e-a28a-43f1-91f6-d310742359ef","Type":"ContainerStarted","Data":"8a1fcf1dd9e392bbbd9e6281fa487af8c1811e6f62a0ab8572b68bcaad1323ba"} Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.309983 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-xpsrr" podStartSLOduration=12.309960605 podStartE2EDuration="12.309960605s" podCreationTimestamp="2025-11-26 07:14:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:26.27624431 +0000 UTC m=+1167.796385929" watchObservedRunningTime="2025-11-26 07:14:27.309960605 +0000 UTC m=+1168.830102224" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.315391 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=13.315368717 podStartE2EDuration="13.315368717s" podCreationTimestamp="2025-11-26 07:14:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:27.281013607 +0000 UTC m=+1168.801155236" watchObservedRunningTime="2025-11-26 07:14:27.315368717 +0000 UTC m=+1168.835510336" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.347033 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=11.347010631 podStartE2EDuration="11.347010631s" podCreationTimestamp="2025-11-26 07:14:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:27.32964657 +0000 UTC m=+1168.849788189" watchObservedRunningTime="2025-11-26 07:14:27.347010631 +0000 UTC m=+1168.867152260" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.646657 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.745795 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49wtm\" (UniqueName: \"kubernetes.io/projected/4950f291-1a37-4725-8321-fa2e0c39155e-kube-api-access-49wtm\") pod \"4950f291-1a37-4725-8321-fa2e0c39155e\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.745864 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-config\") pod \"4950f291-1a37-4725-8321-fa2e0c39155e\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.745969 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-combined-ca-bundle\") pod \"4950f291-1a37-4725-8321-fa2e0c39155e\" (UID: \"4950f291-1a37-4725-8321-fa2e0c39155e\") " Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.751482 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4950f291-1a37-4725-8321-fa2e0c39155e-kube-api-access-49wtm" (OuterVolumeSpecName: "kube-api-access-49wtm") pod "4950f291-1a37-4725-8321-fa2e0c39155e" (UID: "4950f291-1a37-4725-8321-fa2e0c39155e"). InnerVolumeSpecName "kube-api-access-49wtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.776744 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-config" (OuterVolumeSpecName: "config") pod "4950f291-1a37-4725-8321-fa2e0c39155e" (UID: "4950f291-1a37-4725-8321-fa2e0c39155e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.776927 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4950f291-1a37-4725-8321-fa2e0c39155e" (UID: "4950f291-1a37-4725-8321-fa2e0c39155e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.848028 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49wtm\" (UniqueName: \"kubernetes.io/projected/4950f291-1a37-4725-8321-fa2e0c39155e-kube-api-access-49wtm\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.848080 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:27 crc kubenswrapper[4940]: I1126 07:14:27.848102 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4950f291-1a37-4725-8321-fa2e0c39155e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.306407 4940 generic.go:334] "Generic (PLEG): container finished" podID="a29f65e2-c5bd-444a-84d1-7532996c10aa" containerID="95344aa6ca07c1a8566037a544ee375fe3f57a2f83ff0864fdf7dc405181e926" exitCode=0 Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.306481 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-m6x8r" event={"ID":"a29f65e2-c5bd-444a-84d1-7532996c10aa","Type":"ContainerDied","Data":"95344aa6ca07c1a8566037a544ee375fe3f57a2f83ff0864fdf7dc405181e926"} Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.313373 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-g4vh8" event={"ID":"4950f291-1a37-4725-8321-fa2e0c39155e","Type":"ContainerDied","Data":"2b0948f4c9e3bc7cce924994549fafab220e9cbae61c961bc7eed126d9ce8354"} Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.313412 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b0948f4c9e3bc7cce924994549fafab220e9cbae61c961bc7eed126d9ce8354" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.313523 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-g4vh8" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.493853 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c654c9745-wm9vz"] Nov 26 07:14:28 crc kubenswrapper[4940]: E1126 07:14:28.494700 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="init" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.494726 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="init" Nov 26 07:14:28 crc kubenswrapper[4940]: E1126 07:14:28.494760 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="dnsmasq-dns" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.494768 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="dnsmasq-dns" Nov 26 07:14:28 crc kubenswrapper[4940]: E1126 07:14:28.494782 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4950f291-1a37-4725-8321-fa2e0c39155e" containerName="neutron-db-sync" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.494792 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4950f291-1a37-4725-8321-fa2e0c39155e" containerName="neutron-db-sync" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.494992 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="dnsmasq-dns" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.495010 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4950f291-1a37-4725-8321-fa2e0c39155e" containerName="neutron-db-sync" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.496203 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.505528 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c654c9745-wm9vz"] Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.565546 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-svc\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.565615 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-config\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.565723 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-sb\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.565763 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-nb\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.565806 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vktrx\" (UniqueName: \"kubernetes.io/projected/bd800465-05ac-42c7-b1da-fa973f2571d8-kube-api-access-vktrx\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.565827 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-swift-storage-0\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.588203 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-cc77dcc7b-4zbzz"] Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.590163 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.592886 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7nzd7" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.593228 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.593382 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.593598 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.599310 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-cc77dcc7b-4zbzz"] Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667186 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4xj8\" (UniqueName: \"kubernetes.io/projected/4f5c5403-4bbf-4a62-8e53-e0c774252e72-kube-api-access-p4xj8\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667235 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-sb\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667286 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-nb\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667313 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-config\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667341 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vktrx\" (UniqueName: \"kubernetes.io/projected/bd800465-05ac-42c7-b1da-fa973f2571d8-kube-api-access-vktrx\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667365 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-swift-storage-0\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667430 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-svc\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667450 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-httpd-config\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667473 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-config\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667560 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-ovndb-tls-certs\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.667581 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-combined-ca-bundle\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.668663 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-sb\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.668680 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-nb\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.668909 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-swift-storage-0\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.669220 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-svc\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.669399 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-config\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.692006 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vktrx\" (UniqueName: \"kubernetes.io/projected/bd800465-05ac-42c7-b1da-fa973f2571d8-kube-api-access-vktrx\") pod \"dnsmasq-dns-6c654c9745-wm9vz\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.768741 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-httpd-config\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.768840 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-ovndb-tls-certs\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.768863 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-combined-ca-bundle\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.768891 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4xj8\" (UniqueName: \"kubernetes.io/projected/4f5c5403-4bbf-4a62-8e53-e0c774252e72-kube-api-access-p4xj8\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.768925 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-config\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.773032 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-config\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.779150 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-ovndb-tls-certs\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.779547 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-httpd-config\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.784678 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-combined-ca-bundle\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.800709 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4xj8\" (UniqueName: \"kubernetes.io/projected/4f5c5403-4bbf-4a62-8e53-e0c774252e72-kube-api-access-p4xj8\") pod \"neutron-cc77dcc7b-4zbzz\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.824203 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:28 crc kubenswrapper[4940]: I1126 07:14:28.913450 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:29 crc kubenswrapper[4940]: I1126 07:14:29.283092 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c654c9745-wm9vz"] Nov 26 07:14:29 crc kubenswrapper[4940]: I1126 07:14:29.327695 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ffadde9-94cd-4f89-b270-e4a533f5399c" containerID="2227bb45b35e6b1fcd5af6d0afba00e9a99c6d24596b85f6b4a5623ebda271e8" exitCode=0 Nov 26 07:14:29 crc kubenswrapper[4940]: I1126 07:14:29.327779 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xpsrr" event={"ID":"1ffadde9-94cd-4f89-b270-e4a533f5399c","Type":"ContainerDied","Data":"2227bb45b35e6b1fcd5af6d0afba00e9a99c6d24596b85f6b4a5623ebda271e8"} Nov 26 07:14:29 crc kubenswrapper[4940]: I1126 07:14:29.330521 4940 generic.go:334] "Generic (PLEG): container finished" podID="30b2a8d7-c2a4-462f-a93d-dc4b79f238bd" containerID="939ccd4afccd08df91420e1a45d12166e869acef7735945e6dcdc351f2148b88" exitCode=0 Nov 26 07:14:29 crc kubenswrapper[4940]: I1126 07:14:29.330613 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-l5smw" event={"ID":"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd","Type":"ContainerDied","Data":"939ccd4afccd08df91420e1a45d12166e869acef7735945e6dcdc351f2148b88"} Nov 26 07:14:29 crc kubenswrapper[4940]: I1126 07:14:29.362178 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6856c564b9-5c8k6" podUID="1c6385f9-fbe8-4b99-be0a-ed858327c085" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: i/o timeout" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.771880 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.914470 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-scripts\") pod \"a29f65e2-c5bd-444a-84d1-7532996c10aa\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.914591 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29f65e2-c5bd-444a-84d1-7532996c10aa-logs\") pod \"a29f65e2-c5bd-444a-84d1-7532996c10aa\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.914650 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-config-data\") pod \"a29f65e2-c5bd-444a-84d1-7532996c10aa\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.914671 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s49xb\" (UniqueName: \"kubernetes.io/projected/a29f65e2-c5bd-444a-84d1-7532996c10aa-kube-api-access-s49xb\") pod \"a29f65e2-c5bd-444a-84d1-7532996c10aa\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.914708 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-combined-ca-bundle\") pod \"a29f65e2-c5bd-444a-84d1-7532996c10aa\" (UID: \"a29f65e2-c5bd-444a-84d1-7532996c10aa\") " Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.915170 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a29f65e2-c5bd-444a-84d1-7532996c10aa-logs" (OuterVolumeSpecName: "logs") pod "a29f65e2-c5bd-444a-84d1-7532996c10aa" (UID: "a29f65e2-c5bd-444a-84d1-7532996c10aa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.917240 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a29f65e2-c5bd-444a-84d1-7532996c10aa-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.926299 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a29f65e2-c5bd-444a-84d1-7532996c10aa-kube-api-access-s49xb" (OuterVolumeSpecName: "kube-api-access-s49xb") pod "a29f65e2-c5bd-444a-84d1-7532996c10aa" (UID: "a29f65e2-c5bd-444a-84d1-7532996c10aa"). InnerVolumeSpecName "kube-api-access-s49xb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.926392 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-scripts" (OuterVolumeSpecName: "scripts") pod "a29f65e2-c5bd-444a-84d1-7532996c10aa" (UID: "a29f65e2-c5bd-444a-84d1-7532996c10aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.950497 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a29f65e2-c5bd-444a-84d1-7532996c10aa" (UID: "a29f65e2-c5bd-444a-84d1-7532996c10aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.952190 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7648b55b6f-h7txx"] Nov 26 07:14:30 crc kubenswrapper[4940]: E1126 07:14:30.952534 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a29f65e2-c5bd-444a-84d1-7532996c10aa" containerName="placement-db-sync" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.952550 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a29f65e2-c5bd-444a-84d1-7532996c10aa" containerName="placement-db-sync" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.952733 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a29f65e2-c5bd-444a-84d1-7532996c10aa" containerName="placement-db-sync" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.953548 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.957744 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.958148 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.963143 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7648b55b6f-h7txx"] Nov 26 07:14:30 crc kubenswrapper[4940]: I1126 07:14:30.975161 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-config-data" (OuterVolumeSpecName: "config-data") pod "a29f65e2-c5bd-444a-84d1-7532996c10aa" (UID: "a29f65e2-c5bd-444a-84d1-7532996c10aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.019309 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-public-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.019403 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-config\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.019453 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-combined-ca-bundle\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.019566 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-internal-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.019672 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-httpd-config\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.019761 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-ovndb-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.019975 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb85b\" (UniqueName: \"kubernetes.io/projected/e9325bed-7edc-41a3-a53c-fb5d147532f5-kube-api-access-pb85b\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.020212 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.020244 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.020259 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a29f65e2-c5bd-444a-84d1-7532996c10aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.020270 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s49xb\" (UniqueName: \"kubernetes.io/projected/a29f65e2-c5bd-444a-84d1-7532996c10aa-kube-api-access-s49xb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.122347 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-public-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.122432 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-config\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.122482 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-combined-ca-bundle\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.122523 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-internal-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.122550 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-httpd-config\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.122593 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-ovndb-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.122654 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb85b\" (UniqueName: \"kubernetes.io/projected/e9325bed-7edc-41a3-a53c-fb5d147532f5-kube-api-access-pb85b\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.131801 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-internal-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.131990 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-httpd-config\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.132011 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-combined-ca-bundle\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.132183 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-config\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.137532 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-ovndb-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.137992 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-public-tls-certs\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.139211 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb85b\" (UniqueName: \"kubernetes.io/projected/e9325bed-7edc-41a3-a53c-fb5d147532f5-kube-api-access-pb85b\") pod \"neutron-7648b55b6f-h7txx\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.350188 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-m6x8r" event={"ID":"a29f65e2-c5bd-444a-84d1-7532996c10aa","Type":"ContainerDied","Data":"53942896d19949d138d9829c94a0337400bb686af2af64efe797fcd4d0cf07f3"} Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.350235 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53942896d19949d138d9829c94a0337400bb686af2af64efe797fcd4d0cf07f3" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.350292 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-m6x8r" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.375236 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.968981 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-65d858fd7b-dbln9"] Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.975813 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.980776 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-65d858fd7b-dbln9"] Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.981116 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.981271 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.981754 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.981780 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 07:14:31 crc kubenswrapper[4940]: I1126 07:14:31.982213 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-6gcf6" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.040386 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be43a059-c201-4bf3-92ac-304d58de4c02-logs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.040487 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-config-data\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.040515 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-internal-tls-certs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.040545 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-combined-ca-bundle\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.040570 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9rql\" (UniqueName: \"kubernetes.io/projected/be43a059-c201-4bf3-92ac-304d58de4c02-kube-api-access-s9rql\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.040595 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-scripts\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.040616 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-public-tls-certs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.142866 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-scripts\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.142915 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-public-tls-certs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.142957 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be43a059-c201-4bf3-92ac-304d58de4c02-logs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.143034 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-config-data\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.143077 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-internal-tls-certs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.143108 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-combined-ca-bundle\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.143133 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9rql\" (UniqueName: \"kubernetes.io/projected/be43a059-c201-4bf3-92ac-304d58de4c02-kube-api-access-s9rql\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.144809 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be43a059-c201-4bf3-92ac-304d58de4c02-logs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.146749 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-public-tls-certs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.146913 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-scripts\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.149952 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-combined-ca-bundle\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.150064 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-internal-tls-certs\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.150481 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-config-data\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.159391 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9rql\" (UniqueName: \"kubernetes.io/projected/be43a059-c201-4bf3-92ac-304d58de4c02-kube-api-access-s9rql\") pod \"placement-65d858fd7b-dbln9\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.292551 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.607385 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.630612 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.757808 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-db-sync-config-data\") pod \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.758764 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-credential-keys\") pod \"1ffadde9-94cd-4f89-b270-e4a533f5399c\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.758828 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcw95\" (UniqueName: \"kubernetes.io/projected/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-kube-api-access-wcw95\") pod \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.758887 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-scripts\") pod \"1ffadde9-94cd-4f89-b270-e4a533f5399c\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.759015 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfzsg\" (UniqueName: \"kubernetes.io/projected/1ffadde9-94cd-4f89-b270-e4a533f5399c-kube-api-access-rfzsg\") pod \"1ffadde9-94cd-4f89-b270-e4a533f5399c\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.759180 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-fernet-keys\") pod \"1ffadde9-94cd-4f89-b270-e4a533f5399c\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.759428 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-combined-ca-bundle\") pod \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\" (UID: \"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.759562 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-config-data\") pod \"1ffadde9-94cd-4f89-b270-e4a533f5399c\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.759617 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-combined-ca-bundle\") pod \"1ffadde9-94cd-4f89-b270-e4a533f5399c\" (UID: \"1ffadde9-94cd-4f89-b270-e4a533f5399c\") " Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.763865 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "30b2a8d7-c2a4-462f-a93d-dc4b79f238bd" (UID: "30b2a8d7-c2a4-462f-a93d-dc4b79f238bd"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.769206 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-scripts" (OuterVolumeSpecName: "scripts") pod "1ffadde9-94cd-4f89-b270-e4a533f5399c" (UID: "1ffadde9-94cd-4f89-b270-e4a533f5399c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.778833 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-kube-api-access-wcw95" (OuterVolumeSpecName: "kube-api-access-wcw95") pod "30b2a8d7-c2a4-462f-a93d-dc4b79f238bd" (UID: "30b2a8d7-c2a4-462f-a93d-dc4b79f238bd"). InnerVolumeSpecName "kube-api-access-wcw95". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.779440 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1ffadde9-94cd-4f89-b270-e4a533f5399c" (UID: "1ffadde9-94cd-4f89-b270-e4a533f5399c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.779642 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "1ffadde9-94cd-4f89-b270-e4a533f5399c" (UID: "1ffadde9-94cd-4f89-b270-e4a533f5399c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.780971 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ffadde9-94cd-4f89-b270-e4a533f5399c-kube-api-access-rfzsg" (OuterVolumeSpecName: "kube-api-access-rfzsg") pod "1ffadde9-94cd-4f89-b270-e4a533f5399c" (UID: "1ffadde9-94cd-4f89-b270-e4a533f5399c"). InnerVolumeSpecName "kube-api-access-rfzsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.791152 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30b2a8d7-c2a4-462f-a93d-dc4b79f238bd" (UID: "30b2a8d7-c2a4-462f-a93d-dc4b79f238bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.821911 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ffadde9-94cd-4f89-b270-e4a533f5399c" (UID: "1ffadde9-94cd-4f89-b270-e4a533f5399c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.823671 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-config-data" (OuterVolumeSpecName: "config-data") pod "1ffadde9-94cd-4f89-b270-e4a533f5399c" (UID: "1ffadde9-94cd-4f89-b270-e4a533f5399c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.861873 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.861916 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.861933 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.861948 4940 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.862040 4940 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.862071 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcw95\" (UniqueName: \"kubernetes.io/projected/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd-kube-api-access-wcw95\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.862083 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.862095 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfzsg\" (UniqueName: \"kubernetes.io/projected/1ffadde9-94cd-4f89-b270-e4a533f5399c-kube-api-access-rfzsg\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:32 crc kubenswrapper[4940]: I1126 07:14:32.862110 4940 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1ffadde9-94cd-4f89-b270-e4a533f5399c-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.096520 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-cc77dcc7b-4zbzz"] Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.237395 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7648b55b6f-h7txx"] Nov 26 07:14:33 crc kubenswrapper[4940]: W1126 07:14:33.238256 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9325bed_7edc_41a3_a53c_fb5d147532f5.slice/crio-b5dc4b531264bd8a86c07f33beee4a7907a2b2779a72899d2212212d3ecadb41 WatchSource:0}: Error finding container b5dc4b531264bd8a86c07f33beee4a7907a2b2779a72899d2212212d3ecadb41: Status 404 returned error can't find the container with id b5dc4b531264bd8a86c07f33beee4a7907a2b2779a72899d2212212d3ecadb41 Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.277815 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-65d858fd7b-dbln9"] Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.366609 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7648b55b6f-h7txx" event={"ID":"e9325bed-7edc-41a3-a53c-fb5d147532f5","Type":"ContainerStarted","Data":"b5dc4b531264bd8a86c07f33beee4a7907a2b2779a72899d2212212d3ecadb41"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.368697 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xpsrr" event={"ID":"1ffadde9-94cd-4f89-b270-e4a533f5399c","Type":"ContainerDied","Data":"913572365ff8da06f2b7b2acc0059dac81ee767e01ce81f24c3f1e7588697c80"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.368747 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="913572365ff8da06f2b7b2acc0059dac81ee767e01ce81f24c3f1e7588697c80" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.369168 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xpsrr" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.370702 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerStarted","Data":"23beb4790897ba6bf03c51eee0cac936857323ad9c782af67906d0ee81b24dc0"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.371793 4940 generic.go:334] "Generic (PLEG): container finished" podID="bd800465-05ac-42c7-b1da-fa973f2571d8" containerID="d48362f6e06662b68bd83681c5cc07deae98015cd24d261f648e0a740d52729c" exitCode=0 Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.371861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" event={"ID":"bd800465-05ac-42c7-b1da-fa973f2571d8","Type":"ContainerDied","Data":"d48362f6e06662b68bd83681c5cc07deae98015cd24d261f648e0a740d52729c"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.371880 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" event={"ID":"bd800465-05ac-42c7-b1da-fa973f2571d8","Type":"ContainerStarted","Data":"802f14f49b9ab033717e3f761388715a34ed13d986706f08cc7fbb4b018325c8"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.374777 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-l5smw" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.374760 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-l5smw" event={"ID":"30b2a8d7-c2a4-462f-a93d-dc4b79f238bd","Type":"ContainerDied","Data":"aa81cac5861157ec500e02f8e364a1e11ffffe879cba5c91b8c36e583aa55643"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.374884 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa81cac5861157ec500e02f8e364a1e11ffffe879cba5c91b8c36e583aa55643" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.376224 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65d858fd7b-dbln9" event={"ID":"be43a059-c201-4bf3-92ac-304d58de4c02","Type":"ContainerStarted","Data":"6ac3e0c055131f84f050948ddc5f680dcdceafc6cce25235724219cbc66221e1"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.377627 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cc77dcc7b-4zbzz" event={"ID":"4f5c5403-4bbf-4a62-8e53-e0c774252e72","Type":"ContainerStarted","Data":"1742331f9d41135d334bd894ddb48235c0ceec5012de2e3b6099441b5178a47c"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.377652 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cc77dcc7b-4zbzz" event={"ID":"4f5c5403-4bbf-4a62-8e53-e0c774252e72","Type":"ContainerStarted","Data":"e5fb7963395fc3ff1d08da86dda88b7545f6ffc224be80efc471f5921347d059"} Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.727760 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6f4459f4df-b92xj"] Nov 26 07:14:33 crc kubenswrapper[4940]: E1126 07:14:33.728239 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30b2a8d7-c2a4-462f-a93d-dc4b79f238bd" containerName="barbican-db-sync" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.728261 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="30b2a8d7-c2a4-462f-a93d-dc4b79f238bd" containerName="barbican-db-sync" Nov 26 07:14:33 crc kubenswrapper[4940]: E1126 07:14:33.728280 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ffadde9-94cd-4f89-b270-e4a533f5399c" containerName="keystone-bootstrap" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.728288 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ffadde9-94cd-4f89-b270-e4a533f5399c" containerName="keystone-bootstrap" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.728500 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ffadde9-94cd-4f89-b270-e4a533f5399c" containerName="keystone-bootstrap" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.728523 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="30b2a8d7-c2a4-462f-a93d-dc4b79f238bd" containerName="barbican-db-sync" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.729213 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.731894 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.732265 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tgffk" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.732533 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.732730 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.732868 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.733012 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.739558 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6f4459f4df-b92xj"] Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.878937 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-fernet-keys\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.879002 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-internal-tls-certs\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.879219 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk2tn\" (UniqueName: \"kubernetes.io/projected/77513168-a1ea-4794-a859-b942b0e9c262-kube-api-access-fk2tn\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.879331 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-combined-ca-bundle\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.879451 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-credential-keys\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.879535 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-public-tls-certs\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.879635 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-scripts\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.879708 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-config-data\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.961313 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7864499f6b-p77t2"] Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.966411 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.971769 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-8l299" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.971956 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.972096 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.983569 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-fernet-keys\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.983648 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-internal-tls-certs\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.983726 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk2tn\" (UniqueName: \"kubernetes.io/projected/77513168-a1ea-4794-a859-b942b0e9c262-kube-api-access-fk2tn\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.983819 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-combined-ca-bundle\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.983853 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-credential-keys\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.983890 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-public-tls-certs\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.983948 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-scripts\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.983998 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-config-data\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.993175 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-config-data\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.993782 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-combined-ca-bundle\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:33 crc kubenswrapper[4940]: I1126 07:14:33.997462 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-internal-tls-certs\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.010175 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6db949d4cf-kdv49"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.010837 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-public-tls-certs\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.011995 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.016269 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.023444 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-scripts\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.023670 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-credential-keys\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.025817 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-fernet-keys\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.040140 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7864499f6b-p77t2"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.049109 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6db949d4cf-kdv49"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.080661 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk2tn\" (UniqueName: \"kubernetes.io/projected/77513168-a1ea-4794-a859-b942b0e9c262-kube-api-access-fk2tn\") pod \"keystone-6f4459f4df-b92xj\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088592 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-combined-ca-bundle\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088640 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjzgt\" (UniqueName: \"kubernetes.io/projected/2952bb7b-f134-4f55-969b-e30cd8fbe53c-kube-api-access-kjzgt\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088663 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq6z2\" (UniqueName: \"kubernetes.io/projected/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-kube-api-access-sq6z2\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088692 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data-custom\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088720 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-combined-ca-bundle\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088738 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088767 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088805 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-logs\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088825 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data-custom\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.088877 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2952bb7b-f134-4f55-969b-e30cd8fbe53c-logs\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.103973 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c654c9745-wm9vz"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.131507 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cc67f459c-h94cl"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.134511 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.146924 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.164412 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc67f459c-h94cl"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201363 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-combined-ca-bundle\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201445 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201523 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201649 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-logs\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201696 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data-custom\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201796 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2952bb7b-f134-4f55-969b-e30cd8fbe53c-logs\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201846 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-combined-ca-bundle\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201885 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjzgt\" (UniqueName: \"kubernetes.io/projected/2952bb7b-f134-4f55-969b-e30cd8fbe53c-kube-api-access-kjzgt\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201931 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq6z2\" (UniqueName: \"kubernetes.io/projected/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-kube-api-access-sq6z2\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.201970 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data-custom\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.207309 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-logs\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.207575 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2952bb7b-f134-4f55-969b-e30cd8fbe53c-logs\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.211825 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.217713 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data-custom\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.218277 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.218855 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data-custom\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.222979 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjzgt\" (UniqueName: \"kubernetes.io/projected/2952bb7b-f134-4f55-969b-e30cd8fbe53c-kube-api-access-kjzgt\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.224188 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-combined-ca-bundle\") pod \"barbican-keystone-listener-7864499f6b-p77t2\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.228426 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-combined-ca-bundle\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.246148 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq6z2\" (UniqueName: \"kubernetes.io/projected/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-kube-api-access-sq6z2\") pod \"barbican-worker-6db949d4cf-kdv49\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.271947 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6df8bfc666-vzchz"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.318215 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.326335 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-svc\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.326502 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-config\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.326639 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p4j7\" (UniqueName: \"kubernetes.io/projected/63f138a5-b797-4d75-b040-37af152cf338-kube-api-access-9p4j7\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.326802 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.326888 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.327061 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.351369 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6df8bfc666-vzchz"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.351531 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.354845 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.430021 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.430106 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.430153 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.430188 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-svc\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.430224 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-config\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.430285 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p4j7\" (UniqueName: \"kubernetes.io/projected/63f138a5-b797-4d75-b040-37af152cf338-kube-api-access-9p4j7\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.431714 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.435934 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.436735 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.441014 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-config\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.441477 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-svc\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.447617 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7648b55b6f-h7txx" event={"ID":"e9325bed-7edc-41a3-a53c-fb5d147532f5","Type":"ContainerStarted","Data":"5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a"} Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.447663 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7648b55b6f-h7txx" event={"ID":"e9325bed-7edc-41a3-a53c-fb5d147532f5","Type":"ContainerStarted","Data":"8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb"} Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.448223 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.459784 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" event={"ID":"bd800465-05ac-42c7-b1da-fa973f2571d8","Type":"ContainerStarted","Data":"948ae20dc872c8c3fefd3bc118588d65fef313993762c1e17074897665e189fd"} Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.459985 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.464327 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65d858fd7b-dbln9" event={"ID":"be43a059-c201-4bf3-92ac-304d58de4c02","Type":"ContainerStarted","Data":"473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649"} Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.464383 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65d858fd7b-dbln9" event={"ID":"be43a059-c201-4bf3-92ac-304d58de4c02","Type":"ContainerStarted","Data":"26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860"} Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.465244 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.465264 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.466505 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.466537 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.467779 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cc77dcc7b-4zbzz" event={"ID":"4f5c5403-4bbf-4a62-8e53-e0c774252e72","Type":"ContainerStarted","Data":"0a70d4bdb0115f7a18ed21eeda87b03d3f5e7e0d25135b6b262a00eda3b5349f"} Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.468423 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.470398 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.479451 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p4j7\" (UniqueName: \"kubernetes.io/projected/63f138a5-b797-4d75-b040-37af152cf338-kube-api-access-9p4j7\") pod \"dnsmasq-dns-5cc67f459c-h94cl\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.488388 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7648b55b6f-h7txx" podStartSLOduration=4.488369662 podStartE2EDuration="4.488369662s" podCreationTimestamp="2025-11-26 07:14:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:34.479739868 +0000 UTC m=+1175.999881487" watchObservedRunningTime="2025-11-26 07:14:34.488369662 +0000 UTC m=+1176.008511281" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.511436 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" podStartSLOduration=6.511414763 podStartE2EDuration="6.511414763s" podCreationTimestamp="2025-11-26 07:14:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:34.497547423 +0000 UTC m=+1176.017689042" watchObservedRunningTime="2025-11-26 07:14:34.511414763 +0000 UTC m=+1176.031556382" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.529099 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-65d858fd7b-dbln9" podStartSLOduration=3.529081693 podStartE2EDuration="3.529081693s" podCreationTimestamp="2025-11-26 07:14:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:34.526292684 +0000 UTC m=+1176.046434303" watchObservedRunningTime="2025-11-26 07:14:34.529081693 +0000 UTC m=+1176.049223312" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.531454 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data-custom\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.531507 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-logs\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.531550 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.531604 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-combined-ca-bundle\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.531630 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg4m2\" (UniqueName: \"kubernetes.io/projected/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-kube-api-access-rg4m2\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.544148 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.554442 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-cc77dcc7b-4zbzz" podStartSLOduration=6.554423987 podStartE2EDuration="6.554423987s" podCreationTimestamp="2025-11-26 07:14:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:34.551593157 +0000 UTC m=+1176.071734776" watchObservedRunningTime="2025-11-26 07:14:34.554423987 +0000 UTC m=+1176.074565606" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.568404 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.632994 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data-custom\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.633280 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-logs\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.633339 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.633387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-combined-ca-bundle\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.633415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg4m2\" (UniqueName: \"kubernetes.io/projected/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-kube-api-access-rg4m2\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.634112 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-logs\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.638986 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.639558 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-combined-ca-bundle\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.641687 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data-custom\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.658507 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.669537 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg4m2\" (UniqueName: \"kubernetes.io/projected/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-kube-api-access-rg4m2\") pod \"barbican-api-6df8bfc666-vzchz\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.700198 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.895995 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6f4459f4df-b92xj"] Nov 26 07:14:34 crc kubenswrapper[4940]: I1126 07:14:34.995009 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6db949d4cf-kdv49"] Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.122490 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7864499f6b-p77t2"] Nov 26 07:14:35 crc kubenswrapper[4940]: W1126 07:14:35.132182 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2952bb7b_f134_4f55_969b_e30cd8fbe53c.slice/crio-90fc2753869b4009b04c00b5efa8ee4e5f02f38c56a8cbea54f72dbe16c3696c WatchSource:0}: Error finding container 90fc2753869b4009b04c00b5efa8ee4e5f02f38c56a8cbea54f72dbe16c3696c: Status 404 returned error can't find the container with id 90fc2753869b4009b04c00b5efa8ee4e5f02f38c56a8cbea54f72dbe16c3696c Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.224702 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc67f459c-h94cl"] Nov 26 07:14:35 crc kubenswrapper[4940]: W1126 07:14:35.230561 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63f138a5_b797_4d75_b040_37af152cf338.slice/crio-e2a77f8cdd6ab851c3c41d19cfe12679f8e99dd3a98d7ca83cfd72e507329fcb WatchSource:0}: Error finding container e2a77f8cdd6ab851c3c41d19cfe12679f8e99dd3a98d7ca83cfd72e507329fcb: Status 404 returned error can't find the container with id e2a77f8cdd6ab851c3c41d19cfe12679f8e99dd3a98d7ca83cfd72e507329fcb Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.353240 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6df8bfc666-vzchz"] Nov 26 07:14:35 crc kubenswrapper[4940]: W1126 07:14:35.369442 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1c4a097_5a02_4edd_8d1a_f3fc9fb90555.slice/crio-bee1c9e1343584c7cde70a1d330197db86ef5c46530b2a22957eddd3f1402efb WatchSource:0}: Error finding container bee1c9e1343584c7cde70a1d330197db86ef5c46530b2a22957eddd3f1402efb: Status 404 returned error can't find the container with id bee1c9e1343584c7cde70a1d330197db86ef5c46530b2a22957eddd3f1402efb Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.499647 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" event={"ID":"63f138a5-b797-4d75-b040-37af152cf338","Type":"ContainerStarted","Data":"e2a77f8cdd6ab851c3c41d19cfe12679f8e99dd3a98d7ca83cfd72e507329fcb"} Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.508133 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6df8bfc666-vzchz" event={"ID":"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555","Type":"ContainerStarted","Data":"bee1c9e1343584c7cde70a1d330197db86ef5c46530b2a22957eddd3f1402efb"} Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.510593 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" event={"ID":"2952bb7b-f134-4f55-969b-e30cd8fbe53c","Type":"ContainerStarted","Data":"90fc2753869b4009b04c00b5efa8ee4e5f02f38c56a8cbea54f72dbe16c3696c"} Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.515576 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6db949d4cf-kdv49" event={"ID":"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16","Type":"ContainerStarted","Data":"a19eca65076752b981a7e05d304ffa4d8d838c9ceecd139bca4ceb2e7c58740e"} Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.522153 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6f4459f4df-b92xj" event={"ID":"77513168-a1ea-4794-a859-b942b0e9c262","Type":"ContainerStarted","Data":"91bd7e89a1d7d0eb89a61cc1bbe9824e672e433b9e8a1ae66449fbaea8335372"} Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.522205 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6f4459f4df-b92xj" event={"ID":"77513168-a1ea-4794-a859-b942b0e9c262","Type":"ContainerStarted","Data":"2b4d396f8d23b9de4d00bef84e943afc398b45a0c8d76c59da504b4ad6b7a480"} Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.524129 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" podUID="bd800465-05ac-42c7-b1da-fa973f2571d8" containerName="dnsmasq-dns" containerID="cri-o://948ae20dc872c8c3fefd3bc118588d65fef313993762c1e17074897665e189fd" gracePeriod=10 Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.524160 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.524265 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 07:14:35 crc kubenswrapper[4940]: I1126 07:14:35.569833 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6f4459f4df-b92xj" podStartSLOduration=2.5698009600000002 podStartE2EDuration="2.56980096s" podCreationTimestamp="2025-11-26 07:14:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:35.556741786 +0000 UTC m=+1177.076883405" watchObservedRunningTime="2025-11-26 07:14:35.56980096 +0000 UTC m=+1177.089942579" Nov 26 07:14:35 crc kubenswrapper[4940]: E1126 07:14:35.806665 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd800465_05ac_42c7_b1da_fa973f2571d8.slice/crio-conmon-948ae20dc872c8c3fefd3bc118588d65fef313993762c1e17074897665e189fd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63f138a5_b797_4d75_b040_37af152cf338.slice/crio-a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd800465_05ac_42c7_b1da_fa973f2571d8.slice/crio-948ae20dc872c8c3fefd3bc118588d65fef313993762c1e17074897665e189fd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63f138a5_b797_4d75_b040_37af152cf338.slice/crio-conmon-a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.520033 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.520359 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.534727 4940 generic.go:334] "Generic (PLEG): container finished" podID="63f138a5-b797-4d75-b040-37af152cf338" containerID="a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc" exitCode=0 Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.534784 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" event={"ID":"63f138a5-b797-4d75-b040-37af152cf338","Type":"ContainerDied","Data":"a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc"} Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.540246 4940 generic.go:334] "Generic (PLEG): container finished" podID="bd800465-05ac-42c7-b1da-fa973f2571d8" containerID="948ae20dc872c8c3fefd3bc118588d65fef313993762c1e17074897665e189fd" exitCode=0 Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.540317 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" event={"ID":"bd800465-05ac-42c7-b1da-fa973f2571d8","Type":"ContainerDied","Data":"948ae20dc872c8c3fefd3bc118588d65fef313993762c1e17074897665e189fd"} Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.546445 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6df8bfc666-vzchz" event={"ID":"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555","Type":"ContainerStarted","Data":"c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450"} Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.547179 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.581739 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.582439 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.615432 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.904607 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6774864d76-mfv42"] Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.906243 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.910912 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.911259 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 07:14:36 crc kubenswrapper[4940]: I1126 07:14:36.913366 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6774864d76-mfv42"] Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.028588 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.028929 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35c6bf07-4770-450e-a19a-02323913fcd4-logs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.028961 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-combined-ca-bundle\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.029015 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bp49\" (UniqueName: \"kubernetes.io/projected/35c6bf07-4770-450e-a19a-02323913fcd4-kube-api-access-2bp49\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.029064 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-public-tls-certs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.029112 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data-custom\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.029156 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-internal-tls-certs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.130379 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35c6bf07-4770-450e-a19a-02323913fcd4-logs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.130437 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-combined-ca-bundle\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.130502 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bp49\" (UniqueName: \"kubernetes.io/projected/35c6bf07-4770-450e-a19a-02323913fcd4-kube-api-access-2bp49\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.130533 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-public-tls-certs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.130570 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data-custom\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.130605 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-internal-tls-certs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.130650 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.132118 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35c6bf07-4770-450e-a19a-02323913fcd4-logs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.136990 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-public-tls-certs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.137495 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-internal-tls-certs\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.138346 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.148570 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-combined-ca-bundle\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.153723 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data-custom\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.160124 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bp49\" (UniqueName: \"kubernetes.io/projected/35c6bf07-4770-450e-a19a-02323913fcd4-kube-api-access-2bp49\") pod \"barbican-api-6774864d76-mfv42\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.230456 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.587869 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6df8bfc666-vzchz" event={"ID":"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555","Type":"ContainerStarted","Data":"ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca"} Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.588664 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.588708 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.624503 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6df8bfc666-vzchz" podStartSLOduration=3.624484756 podStartE2EDuration="3.624484756s" podCreationTimestamp="2025-11-26 07:14:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:37.62179198 +0000 UTC m=+1179.141933609" watchObservedRunningTime="2025-11-26 07:14:37.624484756 +0000 UTC m=+1179.144626375" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.636689 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" event={"ID":"63f138a5-b797-4d75-b040-37af152cf338","Type":"ContainerStarted","Data":"db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4"} Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.636765 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.636786 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.638022 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.638072 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.703288 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" podStartSLOduration=3.703263674 podStartE2EDuration="3.703263674s" podCreationTimestamp="2025-11-26 07:14:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:37.689779256 +0000 UTC m=+1179.209920895" watchObservedRunningTime="2025-11-26 07:14:37.703263674 +0000 UTC m=+1179.223405293" Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.737004 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6774864d76-mfv42"] Nov 26 07:14:37 crc kubenswrapper[4940]: I1126 07:14:37.970487 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.049004 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-config\") pod \"bd800465-05ac-42c7-b1da-fa973f2571d8\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.049113 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vktrx\" (UniqueName: \"kubernetes.io/projected/bd800465-05ac-42c7-b1da-fa973f2571d8-kube-api-access-vktrx\") pod \"bd800465-05ac-42c7-b1da-fa973f2571d8\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.049159 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-swift-storage-0\") pod \"bd800465-05ac-42c7-b1da-fa973f2571d8\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.049255 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-nb\") pod \"bd800465-05ac-42c7-b1da-fa973f2571d8\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.049301 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-svc\") pod \"bd800465-05ac-42c7-b1da-fa973f2571d8\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.049475 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-sb\") pod \"bd800465-05ac-42c7-b1da-fa973f2571d8\" (UID: \"bd800465-05ac-42c7-b1da-fa973f2571d8\") " Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.059479 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd800465-05ac-42c7-b1da-fa973f2571d8-kube-api-access-vktrx" (OuterVolumeSpecName: "kube-api-access-vktrx") pod "bd800465-05ac-42c7-b1da-fa973f2571d8" (UID: "bd800465-05ac-42c7-b1da-fa973f2571d8"). InnerVolumeSpecName "kube-api-access-vktrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.135513 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bd800465-05ac-42c7-b1da-fa973f2571d8" (UID: "bd800465-05ac-42c7-b1da-fa973f2571d8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.141107 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-config" (OuterVolumeSpecName: "config") pod "bd800465-05ac-42c7-b1da-fa973f2571d8" (UID: "bd800465-05ac-42c7-b1da-fa973f2571d8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.151657 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.151685 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vktrx\" (UniqueName: \"kubernetes.io/projected/bd800465-05ac-42c7-b1da-fa973f2571d8-kube-api-access-vktrx\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.151698 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.158422 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bd800465-05ac-42c7-b1da-fa973f2571d8" (UID: "bd800465-05ac-42c7-b1da-fa973f2571d8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.165988 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bd800465-05ac-42c7-b1da-fa973f2571d8" (UID: "bd800465-05ac-42c7-b1da-fa973f2571d8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.174582 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bd800465-05ac-42c7-b1da-fa973f2571d8" (UID: "bd800465-05ac-42c7-b1da-fa973f2571d8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.253396 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.253429 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.253439 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd800465-05ac-42c7-b1da-fa973f2571d8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.438240 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.690178 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6774864d76-mfv42" event={"ID":"35c6bf07-4770-450e-a19a-02323913fcd4","Type":"ContainerStarted","Data":"38ebe7a40f2302221b977bb730a4dafa3b829dbc717ce7001d421d16b83d3667"} Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.690243 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6774864d76-mfv42" event={"ID":"35c6bf07-4770-450e-a19a-02323913fcd4","Type":"ContainerStarted","Data":"af6d07c01a8f45e7b8f9c9d2cebf33c57841077f225e3827093818e8a7e717c3"} Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.690259 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6774864d76-mfv42" event={"ID":"35c6bf07-4770-450e-a19a-02323913fcd4","Type":"ContainerStarted","Data":"72c8c6f4510b5b0a577fe2f7a8284d7c6465608e72797f12cc46ae0118ece23b"} Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.690338 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.710553 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" event={"ID":"bd800465-05ac-42c7-b1da-fa973f2571d8","Type":"ContainerDied","Data":"802f14f49b9ab033717e3f761388715a34ed13d986706f08cc7fbb4b018325c8"} Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.711237 4940 scope.go:117] "RemoveContainer" containerID="948ae20dc872c8c3fefd3bc118588d65fef313993762c1e17074897665e189fd" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.710594 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c654c9745-wm9vz" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.722791 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.723021 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-wkhpm" event={"ID":"40a16b36-c3e9-4537-bf10-89b685489f39","Type":"ContainerStarted","Data":"cbd7374187aae825ed18d1f8c6f4abf7a3f9464fa401bca64846ddf14a6bf493"} Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.724872 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.732241 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6774864d76-mfv42" podStartSLOduration=2.732143695 podStartE2EDuration="2.732143695s" podCreationTimestamp="2025-11-26 07:14:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:38.716469589 +0000 UTC m=+1180.236611228" watchObservedRunningTime="2025-11-26 07:14:38.732143695 +0000 UTC m=+1180.252285314" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.765366 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-wkhpm" podStartSLOduration=2.420157614 podStartE2EDuration="36.765345348s" podCreationTimestamp="2025-11-26 07:14:02 +0000 UTC" firstStartedPulling="2025-11-26 07:14:03.375646867 +0000 UTC m=+1144.895788486" lastFinishedPulling="2025-11-26 07:14:37.720834601 +0000 UTC m=+1179.240976220" observedRunningTime="2025-11-26 07:14:38.741915806 +0000 UTC m=+1180.262057425" watchObservedRunningTime="2025-11-26 07:14:38.765345348 +0000 UTC m=+1180.285486967" Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.830141 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c654c9745-wm9vz"] Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.838157 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c654c9745-wm9vz"] Nov 26 07:14:38 crc kubenswrapper[4940]: I1126 07:14:38.872017 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 07:14:39 crc kubenswrapper[4940]: I1126 07:14:39.099847 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:39 crc kubenswrapper[4940]: I1126 07:14:39.182721 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd800465-05ac-42c7-b1da-fa973f2571d8" path="/var/lib/kubelet/pods/bd800465-05ac-42c7-b1da-fa973f2571d8/volumes" Nov 26 07:14:39 crc kubenswrapper[4940]: I1126 07:14:39.731757 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:14:39 crc kubenswrapper[4940]: I1126 07:14:39.732652 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:40 crc kubenswrapper[4940]: I1126 07:14:40.208172 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 07:14:42 crc kubenswrapper[4940]: I1126 07:14:42.546222 4940 scope.go:117] "RemoveContainer" containerID="d48362f6e06662b68bd83681c5cc07deae98015cd24d261f648e0a740d52729c" Nov 26 07:14:43 crc kubenswrapper[4940]: I1126 07:14:43.771567 4940 generic.go:334] "Generic (PLEG): container finished" podID="40a16b36-c3e9-4537-bf10-89b685489f39" containerID="cbd7374187aae825ed18d1f8c6f4abf7a3f9464fa401bca64846ddf14a6bf493" exitCode=0 Nov 26 07:14:43 crc kubenswrapper[4940]: I1126 07:14:43.771659 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-wkhpm" event={"ID":"40a16b36-c3e9-4537-bf10-89b685489f39","Type":"ContainerDied","Data":"cbd7374187aae825ed18d1f8c6f4abf7a3f9464fa401bca64846ddf14a6bf493"} Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.661834 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.787559 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76c58b6d97-6b6rc"] Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.788740 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" podUID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" containerName="dnsmasq-dns" containerID="cri-o://63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc" gracePeriod=10 Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.808292 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" event={"ID":"2952bb7b-f134-4f55-969b-e30cd8fbe53c","Type":"ContainerStarted","Data":"6ec52072d9ea19666daec0c37b2fb57fbc8d500a0f552cf60f1fb0ea6a25ed06"} Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.808345 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" event={"ID":"2952bb7b-f134-4f55-969b-e30cd8fbe53c","Type":"ContainerStarted","Data":"4bf744e80f9468639290e0122be55a99e227ce639f923bfca4bfba0a9c767cde"} Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.816583 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6db949d4cf-kdv49" event={"ID":"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16","Type":"ContainerStarted","Data":"b2273f1e55c5a69eb78cfb44375ac989afcca3b119c94705b8c9afb07c4525d3"} Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.816635 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6db949d4cf-kdv49" event={"ID":"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16","Type":"ContainerStarted","Data":"68f96383a43cb6661fc9011d58327bf045a794abb89b6d697dff06c631c6488d"} Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.826579 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="ceilometer-central-agent" containerID="cri-o://c371286dd54e7a4ea39d1cf40298d2fd48e36ed978e9d3f1d34183d5e0e6d5d0" gracePeriod=30 Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.826677 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerStarted","Data":"39e459f180b21d710ef03415c9d778a92e663099bf51bb9fd2ae81d65d7e1c8f"} Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.826720 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.826760 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="proxy-httpd" containerID="cri-o://39e459f180b21d710ef03415c9d778a92e663099bf51bb9fd2ae81d65d7e1c8f" gracePeriod=30 Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.826809 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="sg-core" containerID="cri-o://23beb4790897ba6bf03c51eee0cac936857323ad9c782af67906d0ee81b24dc0" gracePeriod=30 Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.826851 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="ceilometer-notification-agent" containerID="cri-o://3f826e94f4b99a33ae2fbb54cec7a37b25ae6126d27f6602e2d190853c003c6a" gracePeriod=30 Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.869232 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" podStartSLOduration=3.449698687 podStartE2EDuration="11.869207755s" podCreationTimestamp="2025-11-26 07:14:33 +0000 UTC" firstStartedPulling="2025-11-26 07:14:35.135350851 +0000 UTC m=+1176.655492460" lastFinishedPulling="2025-11-26 07:14:43.554859909 +0000 UTC m=+1185.075001528" observedRunningTime="2025-11-26 07:14:44.855837041 +0000 UTC m=+1186.375978670" watchObservedRunningTime="2025-11-26 07:14:44.869207755 +0000 UTC m=+1186.389349374" Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.923005 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.571426982 podStartE2EDuration="42.922986971s" podCreationTimestamp="2025-11-26 07:14:02 +0000 UTC" firstStartedPulling="2025-11-26 07:14:03.302428515 +0000 UTC m=+1144.822570134" lastFinishedPulling="2025-11-26 07:14:43.653988504 +0000 UTC m=+1185.174130123" observedRunningTime="2025-11-26 07:14:44.894611011 +0000 UTC m=+1186.414752630" watchObservedRunningTime="2025-11-26 07:14:44.922986971 +0000 UTC m=+1186.443128600" Nov 26 07:14:44 crc kubenswrapper[4940]: I1126 07:14:44.937030 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6db949d4cf-kdv49" podStartSLOduration=3.392455221 podStartE2EDuration="11.937006745s" podCreationTimestamp="2025-11-26 07:14:33 +0000 UTC" firstStartedPulling="2025-11-26 07:14:35.009316174 +0000 UTC m=+1176.529457793" lastFinishedPulling="2025-11-26 07:14:43.553867708 +0000 UTC m=+1185.074009317" observedRunningTime="2025-11-26 07:14:44.917510257 +0000 UTC m=+1186.437651886" watchObservedRunningTime="2025-11-26 07:14:44.937006745 +0000 UTC m=+1186.457148364" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.340187 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.490429 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-combined-ca-bundle\") pod \"40a16b36-c3e9-4537-bf10-89b685489f39\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.490530 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-db-sync-config-data\") pod \"40a16b36-c3e9-4537-bf10-89b685489f39\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.490639 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr6xx\" (UniqueName: \"kubernetes.io/projected/40a16b36-c3e9-4537-bf10-89b685489f39-kube-api-access-pr6xx\") pod \"40a16b36-c3e9-4537-bf10-89b685489f39\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.490674 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-config-data\") pod \"40a16b36-c3e9-4537-bf10-89b685489f39\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.490695 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40a16b36-c3e9-4537-bf10-89b685489f39-etc-machine-id\") pod \"40a16b36-c3e9-4537-bf10-89b685489f39\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.490725 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-scripts\") pod \"40a16b36-c3e9-4537-bf10-89b685489f39\" (UID: \"40a16b36-c3e9-4537-bf10-89b685489f39\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.494233 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/40a16b36-c3e9-4537-bf10-89b685489f39-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "40a16b36-c3e9-4537-bf10-89b685489f39" (UID: "40a16b36-c3e9-4537-bf10-89b685489f39"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.497293 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "40a16b36-c3e9-4537-bf10-89b685489f39" (UID: "40a16b36-c3e9-4537-bf10-89b685489f39"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.499140 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40a16b36-c3e9-4537-bf10-89b685489f39-kube-api-access-pr6xx" (OuterVolumeSpecName: "kube-api-access-pr6xx") pod "40a16b36-c3e9-4537-bf10-89b685489f39" (UID: "40a16b36-c3e9-4537-bf10-89b685489f39"). InnerVolumeSpecName "kube-api-access-pr6xx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.499229 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-scripts" (OuterVolumeSpecName: "scripts") pod "40a16b36-c3e9-4537-bf10-89b685489f39" (UID: "40a16b36-c3e9-4537-bf10-89b685489f39"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.532175 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40a16b36-c3e9-4537-bf10-89b685489f39" (UID: "40a16b36-c3e9-4537-bf10-89b685489f39"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.550347 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-config-data" (OuterVolumeSpecName: "config-data") pod "40a16b36-c3e9-4537-bf10-89b685489f39" (UID: "40a16b36-c3e9-4537-bf10-89b685489f39"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.592613 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.592654 4940 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.592666 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr6xx\" (UniqueName: \"kubernetes.io/projected/40a16b36-c3e9-4537-bf10-89b685489f39-kube-api-access-pr6xx\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.592680 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.592689 4940 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40a16b36-c3e9-4537-bf10-89b685489f39-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.592696 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40a16b36-c3e9-4537-bf10-89b685489f39-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.731134 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.840067 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-wkhpm" event={"ID":"40a16b36-c3e9-4537-bf10-89b685489f39","Type":"ContainerDied","Data":"0a6440478e44b1206a6a11d239969e3aeb83818072d60c501d7043fabc7e792a"} Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.840350 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a6440478e44b1206a6a11d239969e3aeb83818072d60c501d7043fabc7e792a" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.840448 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-wkhpm" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.852578 4940 generic.go:334] "Generic (PLEG): container finished" podID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" containerID="63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc" exitCode=0 Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.852692 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" event={"ID":"0f3bbf96-4fe3-4933-9d70-7328de80dddc","Type":"ContainerDied","Data":"63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc"} Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.852726 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" event={"ID":"0f3bbf96-4fe3-4933-9d70-7328de80dddc","Type":"ContainerDied","Data":"fa7a3a369b37264955dad15502946b76694aa613ddd55a1d66b3add04be6121a"} Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.852747 4940 scope.go:117] "RemoveContainer" containerID="63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.852934 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76c58b6d97-6b6rc" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.863074 4940 generic.go:334] "Generic (PLEG): container finished" podID="c065108a-6b6e-4257-a44e-15182224f721" containerID="39e459f180b21d710ef03415c9d778a92e663099bf51bb9fd2ae81d65d7e1c8f" exitCode=0 Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.863128 4940 generic.go:334] "Generic (PLEG): container finished" podID="c065108a-6b6e-4257-a44e-15182224f721" containerID="23beb4790897ba6bf03c51eee0cac936857323ad9c782af67906d0ee81b24dc0" exitCode=2 Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.863140 4940 generic.go:334] "Generic (PLEG): container finished" podID="c065108a-6b6e-4257-a44e-15182224f721" containerID="c371286dd54e7a4ea39d1cf40298d2fd48e36ed978e9d3f1d34183d5e0e6d5d0" exitCode=0 Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.863383 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerDied","Data":"39e459f180b21d710ef03415c9d778a92e663099bf51bb9fd2ae81d65d7e1c8f"} Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.863429 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerDied","Data":"23beb4790897ba6bf03c51eee0cac936857323ad9c782af67906d0ee81b24dc0"} Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.863445 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerDied","Data":"c371286dd54e7a4ea39d1cf40298d2fd48e36ed978e9d3f1d34183d5e0e6d5d0"} Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.898149 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfvxb\" (UniqueName: \"kubernetes.io/projected/0f3bbf96-4fe3-4933-9d70-7328de80dddc-kube-api-access-pfvxb\") pod \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.898210 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-config\") pod \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.898245 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-nb\") pod \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.898295 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-sb\") pod \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.898375 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-swift-storage-0\") pod \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.898424 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-svc\") pod \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\" (UID: \"0f3bbf96-4fe3-4933-9d70-7328de80dddc\") " Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.903840 4940 scope.go:117] "RemoveContainer" containerID="3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.906222 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f3bbf96-4fe3-4933-9d70-7328de80dddc-kube-api-access-pfvxb" (OuterVolumeSpecName: "kube-api-access-pfvxb") pod "0f3bbf96-4fe3-4933-9d70-7328de80dddc" (UID: "0f3bbf96-4fe3-4933-9d70-7328de80dddc"). InnerVolumeSpecName "kube-api-access-pfvxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.944022 4940 scope.go:117] "RemoveContainer" containerID="63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc" Nov 26 07:14:45 crc kubenswrapper[4940]: E1126 07:14:45.945440 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc\": container with ID starting with 63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc not found: ID does not exist" containerID="63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.945484 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc"} err="failed to get container status \"63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc\": rpc error: code = NotFound desc = could not find container \"63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc\": container with ID starting with 63bc5eb189447fbd6875fb9c6988eb0274b52fee3cf5237738bd051c24f36bfc not found: ID does not exist" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.945512 4940 scope.go:117] "RemoveContainer" containerID="3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b" Nov 26 07:14:45 crc kubenswrapper[4940]: E1126 07:14:45.946313 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b\": container with ID starting with 3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b not found: ID does not exist" containerID="3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.946425 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b"} err="failed to get container status \"3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b\": rpc error: code = NotFound desc = could not find container \"3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b\": container with ID starting with 3593df4d96313f1ef3957710bf13962713564631eba0c8e32241589e8211583b not found: ID does not exist" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.949106 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0f3bbf96-4fe3-4933-9d70-7328de80dddc" (UID: "0f3bbf96-4fe3-4933-9d70-7328de80dddc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.959759 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0f3bbf96-4fe3-4933-9d70-7328de80dddc" (UID: "0f3bbf96-4fe3-4933-9d70-7328de80dddc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.972091 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0f3bbf96-4fe3-4933-9d70-7328de80dddc" (UID: "0f3bbf96-4fe3-4933-9d70-7328de80dddc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.976564 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-config" (OuterVolumeSpecName: "config") pod "0f3bbf96-4fe3-4933-9d70-7328de80dddc" (UID: "0f3bbf96-4fe3-4933-9d70-7328de80dddc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:45 crc kubenswrapper[4940]: I1126 07:14:45.979459 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0f3bbf96-4fe3-4933-9d70-7328de80dddc" (UID: "0f3bbf96-4fe3-4933-9d70-7328de80dddc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.001342 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.001374 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.001384 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.001393 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfvxb\" (UniqueName: \"kubernetes.io/projected/0f3bbf96-4fe3-4933-9d70-7328de80dddc-kube-api-access-pfvxb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.001406 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.001413 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f3bbf96-4fe3-4933-9d70-7328de80dddc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.100236 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:14:46 crc kubenswrapper[4940]: E1126 07:14:46.100679 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd800465-05ac-42c7-b1da-fa973f2571d8" containerName="dnsmasq-dns" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.100703 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd800465-05ac-42c7-b1da-fa973f2571d8" containerName="dnsmasq-dns" Nov 26 07:14:46 crc kubenswrapper[4940]: E1126 07:14:46.100731 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" containerName="init" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.100741 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" containerName="init" Nov 26 07:14:46 crc kubenswrapper[4940]: E1126 07:14:46.100759 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd800465-05ac-42c7-b1da-fa973f2571d8" containerName="init" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.100767 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd800465-05ac-42c7-b1da-fa973f2571d8" containerName="init" Nov 26 07:14:46 crc kubenswrapper[4940]: E1126 07:14:46.100777 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" containerName="dnsmasq-dns" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.100784 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" containerName="dnsmasq-dns" Nov 26 07:14:46 crc kubenswrapper[4940]: E1126 07:14:46.100805 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40a16b36-c3e9-4537-bf10-89b685489f39" containerName="cinder-db-sync" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.100813 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="40a16b36-c3e9-4537-bf10-89b685489f39" containerName="cinder-db-sync" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.101013 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" containerName="dnsmasq-dns" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.101026 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="40a16b36-c3e9-4537-bf10-89b685489f39" containerName="cinder-db-sync" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.101099 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd800465-05ac-42c7-b1da-fa973f2571d8" containerName="dnsmasq-dns" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.102269 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.114745 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.121451 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.121546 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-m5df9" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.121775 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.121946 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.128201 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-797bbc649-7w2t6"] Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.130466 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.156969 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-797bbc649-7w2t6"] Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.206902 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-scripts\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.206953 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb8lw\" (UniqueName: \"kubernetes.io/projected/2a14eebe-34a0-4cba-ad95-6005882bc37f-kube-api-access-rb8lw\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.206978 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2a14eebe-34a0-4cba-ad95-6005882bc37f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.207017 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.214257 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.214402 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.251299 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76c58b6d97-6b6rc"] Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.268929 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76c58b6d97-6b6rc"] Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.296633 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.298402 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.304354 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316166 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhjcx\" (UniqueName: \"kubernetes.io/projected/54601667-b13d-49f2-8822-9a8c027da9ab-kube-api-access-hhjcx\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316203 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-nb\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316230 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316266 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-config\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316284 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-swift-storage-0\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316299 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-sb\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316327 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316348 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-scripts\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316368 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb8lw\" (UniqueName: \"kubernetes.io/projected/2a14eebe-34a0-4cba-ad95-6005882bc37f-kube-api-access-rb8lw\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316392 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2a14eebe-34a0-4cba-ad95-6005882bc37f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316419 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.316445 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-svc\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.322876 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2a14eebe-34a0-4cba-ad95-6005882bc37f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.330624 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.331503 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.339168 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.343747 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-scripts\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.353929 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.354604 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb8lw\" (UniqueName: \"kubernetes.io/projected/2a14eebe-34a0-4cba-ad95-6005882bc37f-kube-api-access-rb8lw\") pod \"cinder-scheduler-0\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417514 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/04ef468e-3901-495b-842c-34163260f82d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417573 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhjcx\" (UniqueName: \"kubernetes.io/projected/54601667-b13d-49f2-8822-9a8c027da9ab-kube-api-access-hhjcx\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417600 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-nb\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417645 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-config\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417663 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-swift-storage-0\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417677 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-sb\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417699 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9xvj\" (UniqueName: \"kubernetes.io/projected/04ef468e-3901-495b-842c-34163260f82d-kube-api-access-z9xvj\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417717 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417744 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417765 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-scripts\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417783 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data-custom\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417802 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-svc\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.417836 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04ef468e-3901-495b-842c-34163260f82d-logs\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.418953 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-nb\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.419448 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-config\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.420115 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-swift-storage-0\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.420712 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-sb\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.433520 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-svc\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.435367 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.470695 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhjcx\" (UniqueName: \"kubernetes.io/projected/54601667-b13d-49f2-8822-9a8c027da9ab-kube-api-access-hhjcx\") pod \"dnsmasq-dns-797bbc649-7w2t6\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.496563 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.521255 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04ef468e-3901-495b-842c-34163260f82d-logs\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.521391 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/04ef468e-3901-495b-842c-34163260f82d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.521473 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9xvj\" (UniqueName: \"kubernetes.io/projected/04ef468e-3901-495b-842c-34163260f82d-kube-api-access-z9xvj\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.521496 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.521540 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.521561 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-scripts\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.521575 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data-custom\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.522664 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04ef468e-3901-495b-842c-34163260f82d-logs\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.523503 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/04ef468e-3901-495b-842c-34163260f82d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.527539 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.531174 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-scripts\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.531387 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data-custom\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.545602 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.551917 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9xvj\" (UniqueName: \"kubernetes.io/projected/04ef468e-3901-495b-842c-34163260f82d-kube-api-access-z9xvj\") pod \"cinder-api-0\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.622423 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.883582 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.958457 4940 generic.go:334] "Generic (PLEG): container finished" podID="c065108a-6b6e-4257-a44e-15182224f721" containerID="3f826e94f4b99a33ae2fbb54cec7a37b25ae6126d27f6602e2d190853c003c6a" exitCode=0 Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.958828 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerDied","Data":"3f826e94f4b99a33ae2fbb54cec7a37b25ae6126d27f6602e2d190853c003c6a"} Nov 26 07:14:46 crc kubenswrapper[4940]: I1126 07:14:46.982370 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.085135 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.200524 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f3bbf96-4fe3-4933-9d70-7328de80dddc" path="/var/lib/kubelet/pods/0f3bbf96-4fe3-4933-9d70-7328de80dddc/volumes" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.391276 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-797bbc649-7w2t6"] Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.533928 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.615518 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.748314 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vdz8\" (UniqueName: \"kubernetes.io/projected/c065108a-6b6e-4257-a44e-15182224f721-kube-api-access-4vdz8\") pod \"c065108a-6b6e-4257-a44e-15182224f721\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.748403 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-scripts\") pod \"c065108a-6b6e-4257-a44e-15182224f721\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.748425 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-combined-ca-bundle\") pod \"c065108a-6b6e-4257-a44e-15182224f721\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.748457 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-log-httpd\") pod \"c065108a-6b6e-4257-a44e-15182224f721\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.748515 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-config-data\") pod \"c065108a-6b6e-4257-a44e-15182224f721\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.748536 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-sg-core-conf-yaml\") pod \"c065108a-6b6e-4257-a44e-15182224f721\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.748626 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-run-httpd\") pod \"c065108a-6b6e-4257-a44e-15182224f721\" (UID: \"c065108a-6b6e-4257-a44e-15182224f721\") " Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.749640 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c065108a-6b6e-4257-a44e-15182224f721" (UID: "c065108a-6b6e-4257-a44e-15182224f721"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.753377 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c065108a-6b6e-4257-a44e-15182224f721" (UID: "c065108a-6b6e-4257-a44e-15182224f721"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.758811 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-scripts" (OuterVolumeSpecName: "scripts") pod "c065108a-6b6e-4257-a44e-15182224f721" (UID: "c065108a-6b6e-4257-a44e-15182224f721"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.761643 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c065108a-6b6e-4257-a44e-15182224f721-kube-api-access-4vdz8" (OuterVolumeSpecName: "kube-api-access-4vdz8") pod "c065108a-6b6e-4257-a44e-15182224f721" (UID: "c065108a-6b6e-4257-a44e-15182224f721"). InnerVolumeSpecName "kube-api-access-4vdz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.782254 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c065108a-6b6e-4257-a44e-15182224f721" (UID: "c065108a-6b6e-4257-a44e-15182224f721"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.851228 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.851697 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.851778 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vdz8\" (UniqueName: \"kubernetes.io/projected/c065108a-6b6e-4257-a44e-15182224f721-kube-api-access-4vdz8\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.851833 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.851884 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c065108a-6b6e-4257-a44e-15182224f721-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.863344 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c065108a-6b6e-4257-a44e-15182224f721" (UID: "c065108a-6b6e-4257-a44e-15182224f721"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.908480 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-config-data" (OuterVolumeSpecName: "config-data") pod "c065108a-6b6e-4257-a44e-15182224f721" (UID: "c065108a-6b6e-4257-a44e-15182224f721"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.953242 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.953285 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c065108a-6b6e-4257-a44e-15182224f721-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.969144 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2a14eebe-34a0-4cba-ad95-6005882bc37f","Type":"ContainerStarted","Data":"750248bab9ed1f90fbaf684a0ed64f6db848e1f81befc84b7243b6d6ca08b4dc"} Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.970362 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"04ef468e-3901-495b-842c-34163260f82d","Type":"ContainerStarted","Data":"ee4019f5951f8e97665d9fba8097e9523aede489afeda3d20fcf45c1cc0dbf83"} Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.971978 4940 generic.go:334] "Generic (PLEG): container finished" podID="54601667-b13d-49f2-8822-9a8c027da9ab" containerID="4f89de5a9c0ac53d4f1cb615a14cdae9084088d4bd6ad5281f6799ca7a6af6e5" exitCode=0 Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.972050 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" event={"ID":"54601667-b13d-49f2-8822-9a8c027da9ab","Type":"ContainerDied","Data":"4f89de5a9c0ac53d4f1cb615a14cdae9084088d4bd6ad5281f6799ca7a6af6e5"} Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.972067 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" event={"ID":"54601667-b13d-49f2-8822-9a8c027da9ab","Type":"ContainerStarted","Data":"06f683e18b2a1c557d06f4ff29d6f71e08bc0fb3ce34b94834d533aace5305c5"} Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.976305 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.976610 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c065108a-6b6e-4257-a44e-15182224f721","Type":"ContainerDied","Data":"4c49fa62184046c774b2fdef3338ace3669890863959bfb3d7acecfa30e34811"} Nov 26 07:14:47 crc kubenswrapper[4940]: I1126 07:14:47.976658 4940 scope.go:117] "RemoveContainer" containerID="39e459f180b21d710ef03415c9d778a92e663099bf51bb9fd2ae81d65d7e1c8f" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.031184 4940 scope.go:117] "RemoveContainer" containerID="23beb4790897ba6bf03c51eee0cac936857323ad9c782af67906d0ee81b24dc0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.035113 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.054507 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.068759 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:48 crc kubenswrapper[4940]: E1126 07:14:48.069116 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="ceilometer-notification-agent" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.069132 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="ceilometer-notification-agent" Nov 26 07:14:48 crc kubenswrapper[4940]: E1126 07:14:48.069155 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="proxy-httpd" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.069162 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="proxy-httpd" Nov 26 07:14:48 crc kubenswrapper[4940]: E1126 07:14:48.069178 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="ceilometer-central-agent" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.069184 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="ceilometer-central-agent" Nov 26 07:14:48 crc kubenswrapper[4940]: E1126 07:14:48.069198 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="sg-core" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.069204 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="sg-core" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.069363 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="ceilometer-central-agent" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.069375 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="ceilometer-notification-agent" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.069399 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="proxy-httpd" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.069410 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c065108a-6b6e-4257-a44e-15182224f721" containerName="sg-core" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.070875 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.074547 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.106551 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.106555 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.157562 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-log-httpd\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.157620 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dp5h\" (UniqueName: \"kubernetes.io/projected/cb1f11d1-eec4-40ad-8249-2800ef977b08-kube-api-access-4dp5h\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.157642 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-run-httpd\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.157676 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-config-data\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.157695 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.157744 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-scripts\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.157765 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.260089 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-log-httpd\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.260148 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dp5h\" (UniqueName: \"kubernetes.io/projected/cb1f11d1-eec4-40ad-8249-2800ef977b08-kube-api-access-4dp5h\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.260173 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-run-httpd\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.260208 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-config-data\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.260228 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.260285 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-scripts\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.260304 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.261602 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-log-httpd\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.262052 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-run-httpd\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.269665 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-scripts\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.270652 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.289458 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.296159 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-config-data\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.322129 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dp5h\" (UniqueName: \"kubernetes.io/projected/cb1f11d1-eec4-40ad-8249-2800ef977b08-kube-api-access-4dp5h\") pod \"ceilometer-0\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.387209 4940 scope.go:117] "RemoveContainer" containerID="3f826e94f4b99a33ae2fbb54cec7a37b25ae6126d27f6602e2d190853c003c6a" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.487277 4940 scope.go:117] "RemoveContainer" containerID="c371286dd54e7a4ea39d1cf40298d2fd48e36ed978e9d3f1d34183d5e0e6d5d0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.505969 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:14:48 crc kubenswrapper[4940]: I1126 07:14:48.855463 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.015344 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"04ef468e-3901-495b-842c-34163260f82d","Type":"ContainerStarted","Data":"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37"} Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.028662 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" event={"ID":"54601667-b13d-49f2-8822-9a8c027da9ab","Type":"ContainerStarted","Data":"6daed158a52521a75b3a9357fe950a97efec30361d87f114dd62e84dbf9046fd"} Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.028828 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.077372 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" podStartSLOduration=3.077356878 podStartE2EDuration="3.077356878s" podCreationTimestamp="2025-11-26 07:14:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:49.07234818 +0000 UTC m=+1190.592489799" watchObservedRunningTime="2025-11-26 07:14:49.077356878 +0000 UTC m=+1190.597498487" Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.077893 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.206790 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c065108a-6b6e-4257-a44e-15182224f721" path="/var/lib/kubelet/pods/c065108a-6b6e-4257-a44e-15182224f721/volumes" Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.436051 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.717250 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.773590 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6df8bfc666-vzchz"] Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.773879 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6df8bfc666-vzchz" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerName="barbican-api-log" containerID="cri-o://c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450" gracePeriod=30 Nov 26 07:14:49 crc kubenswrapper[4940]: I1126 07:14:49.774023 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6df8bfc666-vzchz" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerName="barbican-api" containerID="cri-o://ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca" gracePeriod=30 Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.073790 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"04ef468e-3901-495b-842c-34163260f82d","Type":"ContainerStarted","Data":"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b"} Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.074357 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.074367 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="04ef468e-3901-495b-842c-34163260f82d" containerName="cinder-api-log" containerID="cri-o://3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37" gracePeriod=30 Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.074457 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="04ef468e-3901-495b-842c-34163260f82d" containerName="cinder-api" containerID="cri-o://cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b" gracePeriod=30 Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.080855 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerStarted","Data":"eafb1e6189953a02a55e573e94f8ea5ae9a8b5ff5c8516a2687e8a27c75a0baf"} Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.080918 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerStarted","Data":"0264dcfcfc9015366960f1683f7b1d499dc6d04584ef405654b3d1018cf27e51"} Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.093451 4940 generic.go:334] "Generic (PLEG): container finished" podID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerID="c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450" exitCode=143 Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.093532 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6df8bfc666-vzchz" event={"ID":"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555","Type":"ContainerDied","Data":"c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450"} Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.101052 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2a14eebe-34a0-4cba-ad95-6005882bc37f","Type":"ContainerStarted","Data":"19fc649718babef140afb00fc5ddc19a43267a152193f48ae000eb1b65170cf4"} Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.101116 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2a14eebe-34a0-4cba-ad95-6005882bc37f","Type":"ContainerStarted","Data":"be8542090fbb2bae84de3bd70e352895b4072c8c9e1a01e16fccddc89b174f36"} Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.116726 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.116702231 podStartE2EDuration="4.116702231s" podCreationTimestamp="2025-11-26 07:14:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:50.098895037 +0000 UTC m=+1191.619036656" watchObservedRunningTime="2025-11-26 07:14:50.116702231 +0000 UTC m=+1191.636843850" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.134448 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.27512631 podStartE2EDuration="4.134429133s" podCreationTimestamp="2025-11-26 07:14:46 +0000 UTC" firstStartedPulling="2025-11-26 07:14:47.05727207 +0000 UTC m=+1188.577413689" lastFinishedPulling="2025-11-26 07:14:47.916574893 +0000 UTC m=+1189.436716512" observedRunningTime="2025-11-26 07:14:50.129202208 +0000 UTC m=+1191.649343827" watchObservedRunningTime="2025-11-26 07:14:50.134429133 +0000 UTC m=+1191.654570752" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.694536 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.844140 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/04ef468e-3901-495b-842c-34163260f82d-etc-machine-id\") pod \"04ef468e-3901-495b-842c-34163260f82d\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.844427 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/04ef468e-3901-495b-842c-34163260f82d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "04ef468e-3901-495b-842c-34163260f82d" (UID: "04ef468e-3901-495b-842c-34163260f82d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.844503 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9xvj\" (UniqueName: \"kubernetes.io/projected/04ef468e-3901-495b-842c-34163260f82d-kube-api-access-z9xvj\") pod \"04ef468e-3901-495b-842c-34163260f82d\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.844538 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data-custom\") pod \"04ef468e-3901-495b-842c-34163260f82d\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.844565 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-scripts\") pod \"04ef468e-3901-495b-842c-34163260f82d\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.844613 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data\") pod \"04ef468e-3901-495b-842c-34163260f82d\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.844630 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-combined-ca-bundle\") pod \"04ef468e-3901-495b-842c-34163260f82d\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.844664 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04ef468e-3901-495b-842c-34163260f82d-logs\") pod \"04ef468e-3901-495b-842c-34163260f82d\" (UID: \"04ef468e-3901-495b-842c-34163260f82d\") " Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.845119 4940 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/04ef468e-3901-495b-842c-34163260f82d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.845382 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04ef468e-3901-495b-842c-34163260f82d-logs" (OuterVolumeSpecName: "logs") pod "04ef468e-3901-495b-842c-34163260f82d" (UID: "04ef468e-3901-495b-842c-34163260f82d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.850988 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "04ef468e-3901-495b-842c-34163260f82d" (UID: "04ef468e-3901-495b-842c-34163260f82d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.851128 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-scripts" (OuterVolumeSpecName: "scripts") pod "04ef468e-3901-495b-842c-34163260f82d" (UID: "04ef468e-3901-495b-842c-34163260f82d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.851793 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04ef468e-3901-495b-842c-34163260f82d-kube-api-access-z9xvj" (OuterVolumeSpecName: "kube-api-access-z9xvj") pod "04ef468e-3901-495b-842c-34163260f82d" (UID: "04ef468e-3901-495b-842c-34163260f82d"). InnerVolumeSpecName "kube-api-access-z9xvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.882209 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "04ef468e-3901-495b-842c-34163260f82d" (UID: "04ef468e-3901-495b-842c-34163260f82d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.905117 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data" (OuterVolumeSpecName: "config-data") pod "04ef468e-3901-495b-842c-34163260f82d" (UID: "04ef468e-3901-495b-842c-34163260f82d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.946483 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9xvj\" (UniqueName: \"kubernetes.io/projected/04ef468e-3901-495b-842c-34163260f82d-kube-api-access-z9xvj\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.946508 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.946524 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.946533 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.946541 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ef468e-3901-495b-842c-34163260f82d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:50 crc kubenswrapper[4940]: I1126 07:14:50.946549 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04ef468e-3901-495b-842c-34163260f82d-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.107889 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerStarted","Data":"e5c491f5a6227127fb6763832393e74065c6bfda0b03329eb5cc0df21024c4cd"} Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.109901 4940 generic.go:334] "Generic (PLEG): container finished" podID="04ef468e-3901-495b-842c-34163260f82d" containerID="cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b" exitCode=0 Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.109937 4940 generic.go:334] "Generic (PLEG): container finished" podID="04ef468e-3901-495b-842c-34163260f82d" containerID="3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37" exitCode=143 Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.110955 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.111131 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"04ef468e-3901-495b-842c-34163260f82d","Type":"ContainerDied","Data":"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b"} Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.111182 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"04ef468e-3901-495b-842c-34163260f82d","Type":"ContainerDied","Data":"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37"} Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.111211 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"04ef468e-3901-495b-842c-34163260f82d","Type":"ContainerDied","Data":"ee4019f5951f8e97665d9fba8097e9523aede489afeda3d20fcf45c1cc0dbf83"} Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.111237 4940 scope.go:117] "RemoveContainer" containerID="cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.143848 4940 scope.go:117] "RemoveContainer" containerID="3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.150868 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.164590 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.182366 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04ef468e-3901-495b-842c-34163260f82d" path="/var/lib/kubelet/pods/04ef468e-3901-495b-842c-34163260f82d/volumes" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.182504 4940 scope.go:117] "RemoveContainer" containerID="cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b" Nov 26 07:14:51 crc kubenswrapper[4940]: E1126 07:14:51.182920 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b\": container with ID starting with cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b not found: ID does not exist" containerID="cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.182950 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b"} err="failed to get container status \"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b\": rpc error: code = NotFound desc = could not find container \"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b\": container with ID starting with cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b not found: ID does not exist" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.182973 4940 scope.go:117] "RemoveContainer" containerID="3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37" Nov 26 07:14:51 crc kubenswrapper[4940]: E1126 07:14:51.183200 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37\": container with ID starting with 3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37 not found: ID does not exist" containerID="3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.183222 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37"} err="failed to get container status \"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37\": rpc error: code = NotFound desc = could not find container \"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37\": container with ID starting with 3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37 not found: ID does not exist" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.183246 4940 scope.go:117] "RemoveContainer" containerID="cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.183383 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.183544 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b"} err="failed to get container status \"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b\": rpc error: code = NotFound desc = could not find container \"cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b\": container with ID starting with cea6d8927a954723209e4d28714a51f0d523301b5c345bb30f55caf59e0f948b not found: ID does not exist" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.183618 4940 scope.go:117] "RemoveContainer" containerID="3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37" Nov 26 07:14:51 crc kubenswrapper[4940]: E1126 07:14:51.183827 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04ef468e-3901-495b-842c-34163260f82d" containerName="cinder-api-log" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.183921 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="04ef468e-3901-495b-842c-34163260f82d" containerName="cinder-api-log" Nov 26 07:14:51 crc kubenswrapper[4940]: E1126 07:14:51.183986 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04ef468e-3901-495b-842c-34163260f82d" containerName="cinder-api" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.184278 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="04ef468e-3901-495b-842c-34163260f82d" containerName="cinder-api" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.184020 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37"} err="failed to get container status \"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37\": rpc error: code = NotFound desc = could not find container \"3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37\": container with ID starting with 3fb5b126d4908c544006d197e8b29b1ebb665f4869584b84acb26dadd808bd37 not found: ID does not exist" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.184566 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="04ef468e-3901-495b-842c-34163260f82d" containerName="cinder-api-log" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.184655 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="04ef468e-3901-495b-842c-34163260f82d" containerName="cinder-api" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.185566 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.187662 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.187884 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.188845 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.195669 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355015 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-scripts\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355118 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krpxk\" (UniqueName: \"kubernetes.io/projected/ff45362c-e19e-470b-9b67-a1c2d6385ba9-kube-api-access-krpxk\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355149 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355172 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355305 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355470 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data-custom\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355504 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355682 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff45362c-e19e-470b-9b67-a1c2d6385ba9-logs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.355747 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ff45362c-e19e-470b-9b67-a1c2d6385ba9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.436617 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.457084 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-scripts\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.457141 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krpxk\" (UniqueName: \"kubernetes.io/projected/ff45362c-e19e-470b-9b67-a1c2d6385ba9-kube-api-access-krpxk\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.457168 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.457186 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.457924 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.458150 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data-custom\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.458210 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.458424 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff45362c-e19e-470b-9b67-a1c2d6385ba9-logs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.458700 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ff45362c-e19e-470b-9b67-a1c2d6385ba9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.458998 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ff45362c-e19e-470b-9b67-a1c2d6385ba9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.459121 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff45362c-e19e-470b-9b67-a1c2d6385ba9-logs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.465569 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-scripts\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.466170 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.468160 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.472338 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.478821 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.479854 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data-custom\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.493571 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krpxk\" (UniqueName: \"kubernetes.io/projected/ff45362c-e19e-470b-9b67-a1c2d6385ba9-kube-api-access-krpxk\") pod \"cinder-api-0\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.515784 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.728877 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.729236 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.729285 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.729997 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0393c811a7e2a741f1c1d69f74428b6878add21bd4ed185abcf620da55c2d4a4"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:14:51 crc kubenswrapper[4940]: I1126 07:14:51.730064 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://0393c811a7e2a741f1c1d69f74428b6878add21bd4ed185abcf620da55c2d4a4" gracePeriod=600 Nov 26 07:14:52 crc kubenswrapper[4940]: I1126 07:14:52.014936 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:14:52 crc kubenswrapper[4940]: W1126 07:14:52.016394 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff45362c_e19e_470b_9b67_a1c2d6385ba9.slice/crio-0d358721ff987af6c7566e52a9dd59e0ed7fb881e692d8dcdb35b8b6cf753163 WatchSource:0}: Error finding container 0d358721ff987af6c7566e52a9dd59e0ed7fb881e692d8dcdb35b8b6cf753163: Status 404 returned error can't find the container with id 0d358721ff987af6c7566e52a9dd59e0ed7fb881e692d8dcdb35b8b6cf753163 Nov 26 07:14:52 crc kubenswrapper[4940]: I1126 07:14:52.121617 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerStarted","Data":"90f372e19617297639de26fe21eb86f93a4ac333d8bd31103b6cd4c11f12608c"} Nov 26 07:14:52 crc kubenswrapper[4940]: I1126 07:14:52.123733 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ff45362c-e19e-470b-9b67-a1c2d6385ba9","Type":"ContainerStarted","Data":"0d358721ff987af6c7566e52a9dd59e0ed7fb881e692d8dcdb35b8b6cf753163"} Nov 26 07:14:52 crc kubenswrapper[4940]: I1126 07:14:52.125812 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="0393c811a7e2a741f1c1d69f74428b6878add21bd4ed185abcf620da55c2d4a4" exitCode=0 Nov 26 07:14:52 crc kubenswrapper[4940]: I1126 07:14:52.125867 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"0393c811a7e2a741f1c1d69f74428b6878add21bd4ed185abcf620da55c2d4a4"} Nov 26 07:14:52 crc kubenswrapper[4940]: I1126 07:14:52.125889 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"8474433f4fb6790caac5b18927cabd8ff680b641694a71892a6b78f7af1c5c17"} Nov 26 07:14:52 crc kubenswrapper[4940]: I1126 07:14:52.125909 4940 scope.go:117] "RemoveContainer" containerID="a590ce9820b409bfd278396384f5037a9a72b69b72e14ce99d91eca689514af5" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.145145 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerStarted","Data":"b1625ffcb166a1e88167a4dbbbf8453100b92e8da85bee70c26bf2f20facfb63"} Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.150855 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ff45362c-e19e-470b-9b67-a1c2d6385ba9","Type":"ContainerStarted","Data":"cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd"} Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.150918 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ff45362c-e19e-470b-9b67-a1c2d6385ba9","Type":"ContainerStarted","Data":"f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594"} Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.151774 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.198115 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.19809972 podStartE2EDuration="2.19809972s" podCreationTimestamp="2025-11-26 07:14:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:14:53.197593833 +0000 UTC m=+1194.717735452" watchObservedRunningTime="2025-11-26 07:14:53.19809972 +0000 UTC m=+1194.718241339" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.199159 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.397453919 podStartE2EDuration="5.199153202s" podCreationTimestamp="2025-11-26 07:14:48 +0000 UTC" firstStartedPulling="2025-11-26 07:14:49.097209848 +0000 UTC m=+1190.617351467" lastFinishedPulling="2025-11-26 07:14:52.898909131 +0000 UTC m=+1194.419050750" observedRunningTime="2025-11-26 07:14:53.177363261 +0000 UTC m=+1194.697504890" watchObservedRunningTime="2025-11-26 07:14:53.199153202 +0000 UTC m=+1194.719294821" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.690863 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.812018 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data\") pod \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.812456 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rg4m2\" (UniqueName: \"kubernetes.io/projected/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-kube-api-access-rg4m2\") pod \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.813330 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data-custom\") pod \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.813486 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-logs\") pod \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.813636 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-combined-ca-bundle\") pod \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\" (UID: \"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555\") " Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.814513 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-logs" (OuterVolumeSpecName: "logs") pod "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" (UID: "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.822936 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-kube-api-access-rg4m2" (OuterVolumeSpecName: "kube-api-access-rg4m2") pod "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" (UID: "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555"). InnerVolumeSpecName "kube-api-access-rg4m2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.835491 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" (UID: "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.862550 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" (UID: "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.863260 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data" (OuterVolumeSpecName: "config-data") pod "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" (UID: "b1c4a097-5a02-4edd-8d1a-f3fc9fb90555"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.915679 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.915726 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.915738 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rg4m2\" (UniqueName: \"kubernetes.io/projected/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-kube-api-access-rg4m2\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.915749 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:53 crc kubenswrapper[4940]: I1126 07:14:53.915757 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.175519 4940 generic.go:334] "Generic (PLEG): container finished" podID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerID="ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca" exitCode=0 Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.175559 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6df8bfc666-vzchz" event={"ID":"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555","Type":"ContainerDied","Data":"ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca"} Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.176611 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6df8bfc666-vzchz" event={"ID":"b1c4a097-5a02-4edd-8d1a-f3fc9fb90555","Type":"ContainerDied","Data":"bee1c9e1343584c7cde70a1d330197db86ef5c46530b2a22957eddd3f1402efb"} Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.175627 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6df8bfc666-vzchz" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.176646 4940 scope.go:117] "RemoveContainer" containerID="ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.178793 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.210854 4940 scope.go:117] "RemoveContainer" containerID="c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.229498 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6df8bfc666-vzchz"] Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.230963 4940 scope.go:117] "RemoveContainer" containerID="ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca" Nov 26 07:14:54 crc kubenswrapper[4940]: E1126 07:14:54.231352 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca\": container with ID starting with ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca not found: ID does not exist" containerID="ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.231444 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca"} err="failed to get container status \"ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca\": rpc error: code = NotFound desc = could not find container \"ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca\": container with ID starting with ac57ef413c3abdf1956493a02eba067b225a615fc5f51cf9f6e43f3a75aad1ca not found: ID does not exist" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.231580 4940 scope.go:117] "RemoveContainer" containerID="c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450" Nov 26 07:14:54 crc kubenswrapper[4940]: E1126 07:14:54.232063 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450\": container with ID starting with c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450 not found: ID does not exist" containerID="c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.232135 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450"} err="failed to get container status \"c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450\": rpc error: code = NotFound desc = could not find container \"c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450\": container with ID starting with c7e931490721366884a13d372ec971efb813cd8edc86c1bfd6120456b1daa450 not found: ID does not exist" Nov 26 07:14:54 crc kubenswrapper[4940]: I1126 07:14:54.238529 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6df8bfc666-vzchz"] Nov 26 07:14:55 crc kubenswrapper[4940]: I1126 07:14:55.187597 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" path="/var/lib/kubelet/pods/b1c4a097-5a02-4edd-8d1a-f3fc9fb90555/volumes" Nov 26 07:14:56 crc kubenswrapper[4940]: I1126 07:14:56.498207 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:14:56 crc kubenswrapper[4940]: I1126 07:14:56.575609 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc67f459c-h94cl"] Nov 26 07:14:56 crc kubenswrapper[4940]: I1126 07:14:56.575911 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" podUID="63f138a5-b797-4d75-b040-37af152cf338" containerName="dnsmasq-dns" containerID="cri-o://db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4" gracePeriod=10 Nov 26 07:14:56 crc kubenswrapper[4940]: I1126 07:14:56.714182 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 07:14:56 crc kubenswrapper[4940]: I1126 07:14:56.777326 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.121266 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.213169 4940 generic.go:334] "Generic (PLEG): container finished" podID="63f138a5-b797-4d75-b040-37af152cf338" containerID="db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4" exitCode=0 Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.213267 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.213275 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" event={"ID":"63f138a5-b797-4d75-b040-37af152cf338","Type":"ContainerDied","Data":"db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4"} Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.213429 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc67f459c-h94cl" event={"ID":"63f138a5-b797-4d75-b040-37af152cf338","Type":"ContainerDied","Data":"e2a77f8cdd6ab851c3c41d19cfe12679f8e99dd3a98d7ca83cfd72e507329fcb"} Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.213446 4940 scope.go:117] "RemoveContainer" containerID="db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.213883 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerName="cinder-scheduler" containerID="cri-o://be8542090fbb2bae84de3bd70e352895b4072c8c9e1a01e16fccddc89b174f36" gracePeriod=30 Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.214033 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerName="probe" containerID="cri-o://19fc649718babef140afb00fc5ddc19a43267a152193f48ae000eb1b65170cf4" gracePeriod=30 Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.239448 4940 scope.go:117] "RemoveContainer" containerID="a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.266550 4940 scope.go:117] "RemoveContainer" containerID="db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4" Nov 26 07:14:57 crc kubenswrapper[4940]: E1126 07:14:57.268073 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4\": container with ID starting with db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4 not found: ID does not exist" containerID="db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.268106 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4"} err="failed to get container status \"db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4\": rpc error: code = NotFound desc = could not find container \"db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4\": container with ID starting with db9cfc59a61bbd0fbf2c1610ddb1dee23a5d7d300edd43282ed6b06f51c48fa4 not found: ID does not exist" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.268128 4940 scope.go:117] "RemoveContainer" containerID="a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc" Nov 26 07:14:57 crc kubenswrapper[4940]: E1126 07:14:57.268365 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc\": container with ID starting with a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc not found: ID does not exist" containerID="a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.268386 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc"} err="failed to get container status \"a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc\": rpc error: code = NotFound desc = could not find container \"a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc\": container with ID starting with a29a2fc93794c70c5fa46b72030f73f5f5315bde43c71c48ae4021ac48b3b8bc not found: ID does not exist" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.278382 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-sb\") pod \"63f138a5-b797-4d75-b040-37af152cf338\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.278418 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-svc\") pod \"63f138a5-b797-4d75-b040-37af152cf338\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.278515 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-swift-storage-0\") pod \"63f138a5-b797-4d75-b040-37af152cf338\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.278546 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p4j7\" (UniqueName: \"kubernetes.io/projected/63f138a5-b797-4d75-b040-37af152cf338-kube-api-access-9p4j7\") pod \"63f138a5-b797-4d75-b040-37af152cf338\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.278586 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-config\") pod \"63f138a5-b797-4d75-b040-37af152cf338\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.278652 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-nb\") pod \"63f138a5-b797-4d75-b040-37af152cf338\" (UID: \"63f138a5-b797-4d75-b040-37af152cf338\") " Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.285960 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63f138a5-b797-4d75-b040-37af152cf338-kube-api-access-9p4j7" (OuterVolumeSpecName: "kube-api-access-9p4j7") pod "63f138a5-b797-4d75-b040-37af152cf338" (UID: "63f138a5-b797-4d75-b040-37af152cf338"). InnerVolumeSpecName "kube-api-access-9p4j7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.327542 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "63f138a5-b797-4d75-b040-37af152cf338" (UID: "63f138a5-b797-4d75-b040-37af152cf338"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.329500 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-config" (OuterVolumeSpecName: "config") pod "63f138a5-b797-4d75-b040-37af152cf338" (UID: "63f138a5-b797-4d75-b040-37af152cf338"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.331669 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "63f138a5-b797-4d75-b040-37af152cf338" (UID: "63f138a5-b797-4d75-b040-37af152cf338"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.332701 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "63f138a5-b797-4d75-b040-37af152cf338" (UID: "63f138a5-b797-4d75-b040-37af152cf338"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.340752 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "63f138a5-b797-4d75-b040-37af152cf338" (UID: "63f138a5-b797-4d75-b040-37af152cf338"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.380357 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.380468 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9p4j7\" (UniqueName: \"kubernetes.io/projected/63f138a5-b797-4d75-b040-37af152cf338-kube-api-access-9p4j7\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.380481 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.380489 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.380499 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.380506 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63f138a5-b797-4d75-b040-37af152cf338-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.543853 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc67f459c-h94cl"] Nov 26 07:14:57 crc kubenswrapper[4940]: I1126 07:14:57.550208 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cc67f459c-h94cl"] Nov 26 07:14:58 crc kubenswrapper[4940]: I1126 07:14:58.235962 4940 generic.go:334] "Generic (PLEG): container finished" podID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerID="19fc649718babef140afb00fc5ddc19a43267a152193f48ae000eb1b65170cf4" exitCode=0 Nov 26 07:14:58 crc kubenswrapper[4940]: I1126 07:14:58.236247 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2a14eebe-34a0-4cba-ad95-6005882bc37f","Type":"ContainerDied","Data":"19fc649718babef140afb00fc5ddc19a43267a152193f48ae000eb1b65170cf4"} Nov 26 07:14:58 crc kubenswrapper[4940]: I1126 07:14:58.924122 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:14:59 crc kubenswrapper[4940]: I1126 07:14:59.177759 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63f138a5-b797-4d75-b040-37af152cf338" path="/var/lib/kubelet/pods/63f138a5-b797-4d75-b040-37af152cf338/volumes" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.135462 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99"] Nov 26 07:15:00 crc kubenswrapper[4940]: E1126 07:15:00.136126 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerName="barbican-api" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.136141 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerName="barbican-api" Nov 26 07:15:00 crc kubenswrapper[4940]: E1126 07:15:00.136179 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63f138a5-b797-4d75-b040-37af152cf338" containerName="dnsmasq-dns" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.136191 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="63f138a5-b797-4d75-b040-37af152cf338" containerName="dnsmasq-dns" Nov 26 07:15:00 crc kubenswrapper[4940]: E1126 07:15:00.136211 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerName="barbican-api-log" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.136218 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerName="barbican-api-log" Nov 26 07:15:00 crc kubenswrapper[4940]: E1126 07:15:00.136234 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63f138a5-b797-4d75-b040-37af152cf338" containerName="init" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.136240 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="63f138a5-b797-4d75-b040-37af152cf338" containerName="init" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.136436 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerName="barbican-api-log" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.136453 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="63f138a5-b797-4d75-b040-37af152cf338" containerName="dnsmasq-dns" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.136498 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1c4a097-5a02-4edd-8d1a-f3fc9fb90555" containerName="barbican-api" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.137124 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.141626 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.142020 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.157403 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99"] Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.233352 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38381651-2c1c-4c63-8a3c-122d6cd39737-config-volume\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.233477 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnfgr\" (UniqueName: \"kubernetes.io/projected/38381651-2c1c-4c63-8a3c-122d6cd39737-kube-api-access-tnfgr\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.233505 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38381651-2c1c-4c63-8a3c-122d6cd39737-secret-volume\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.258760 4940 generic.go:334] "Generic (PLEG): container finished" podID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerID="be8542090fbb2bae84de3bd70e352895b4072c8c9e1a01e16fccddc89b174f36" exitCode=0 Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.258802 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2a14eebe-34a0-4cba-ad95-6005882bc37f","Type":"ContainerDied","Data":"be8542090fbb2bae84de3bd70e352895b4072c8c9e1a01e16fccddc89b174f36"} Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.335621 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnfgr\" (UniqueName: \"kubernetes.io/projected/38381651-2c1c-4c63-8a3c-122d6cd39737-kube-api-access-tnfgr\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.335676 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38381651-2c1c-4c63-8a3c-122d6cd39737-secret-volume\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.335799 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38381651-2c1c-4c63-8a3c-122d6cd39737-config-volume\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.337586 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38381651-2c1c-4c63-8a3c-122d6cd39737-config-volume\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.354947 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38381651-2c1c-4c63-8a3c-122d6cd39737-secret-volume\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.359239 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnfgr\" (UniqueName: \"kubernetes.io/projected/38381651-2c1c-4c63-8a3c-122d6cd39737-kube-api-access-tnfgr\") pod \"collect-profiles-29402355-qvm99\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.429458 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.459802 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.540786 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2a14eebe-34a0-4cba-ad95-6005882bc37f-etc-machine-id\") pod \"2a14eebe-34a0-4cba-ad95-6005882bc37f\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.540865 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data\") pod \"2a14eebe-34a0-4cba-ad95-6005882bc37f\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.540967 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb8lw\" (UniqueName: \"kubernetes.io/projected/2a14eebe-34a0-4cba-ad95-6005882bc37f-kube-api-access-rb8lw\") pod \"2a14eebe-34a0-4cba-ad95-6005882bc37f\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.541097 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data-custom\") pod \"2a14eebe-34a0-4cba-ad95-6005882bc37f\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.541205 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-combined-ca-bundle\") pod \"2a14eebe-34a0-4cba-ad95-6005882bc37f\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.541253 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-scripts\") pod \"2a14eebe-34a0-4cba-ad95-6005882bc37f\" (UID: \"2a14eebe-34a0-4cba-ad95-6005882bc37f\") " Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.541940 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a14eebe-34a0-4cba-ad95-6005882bc37f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2a14eebe-34a0-4cba-ad95-6005882bc37f" (UID: "2a14eebe-34a0-4cba-ad95-6005882bc37f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.549105 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2a14eebe-34a0-4cba-ad95-6005882bc37f" (UID: "2a14eebe-34a0-4cba-ad95-6005882bc37f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.549156 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-scripts" (OuterVolumeSpecName: "scripts") pod "2a14eebe-34a0-4cba-ad95-6005882bc37f" (UID: "2a14eebe-34a0-4cba-ad95-6005882bc37f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.549170 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a14eebe-34a0-4cba-ad95-6005882bc37f-kube-api-access-rb8lw" (OuterVolumeSpecName: "kube-api-access-rb8lw") pod "2a14eebe-34a0-4cba-ad95-6005882bc37f" (UID: "2a14eebe-34a0-4cba-ad95-6005882bc37f"). InnerVolumeSpecName "kube-api-access-rb8lw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.607465 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a14eebe-34a0-4cba-ad95-6005882bc37f" (UID: "2a14eebe-34a0-4cba-ad95-6005882bc37f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.644289 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb8lw\" (UniqueName: \"kubernetes.io/projected/2a14eebe-34a0-4cba-ad95-6005882bc37f-kube-api-access-rb8lw\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.644678 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.644692 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.644704 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.644717 4940 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2a14eebe-34a0-4cba-ad95-6005882bc37f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.661354 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data" (OuterVolumeSpecName: "config-data") pod "2a14eebe-34a0-4cba-ad95-6005882bc37f" (UID: "2a14eebe-34a0-4cba-ad95-6005882bc37f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.746620 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a14eebe-34a0-4cba-ad95-6005882bc37f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:00 crc kubenswrapper[4940]: I1126 07:15:00.997690 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99"] Nov 26 07:15:01 crc kubenswrapper[4940]: W1126 07:15:01.006497 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38381651_2c1c_4c63_8a3c_122d6cd39737.slice/crio-4e3d48c080913acee565f6273e2ae4f1b6e134bc7d6ae8319d43978a4a7a4dc4 WatchSource:0}: Error finding container 4e3d48c080913acee565f6273e2ae4f1b6e134bc7d6ae8319d43978a4a7a4dc4: Status 404 returned error can't find the container with id 4e3d48c080913acee565f6273e2ae4f1b6e134bc7d6ae8319d43978a4a7a4dc4 Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.270267 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2a14eebe-34a0-4cba-ad95-6005882bc37f","Type":"ContainerDied","Data":"750248bab9ed1f90fbaf684a0ed64f6db848e1f81befc84b7243b6d6ca08b4dc"} Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.270585 4940 scope.go:117] "RemoveContainer" containerID="19fc649718babef140afb00fc5ddc19a43267a152193f48ae000eb1b65170cf4" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.270456 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.272491 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" event={"ID":"38381651-2c1c-4c63-8a3c-122d6cd39737","Type":"ContainerStarted","Data":"4e3d48c080913acee565f6273e2ae4f1b6e134bc7d6ae8319d43978a4a7a4dc4"} Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.298601 4940 scope.go:117] "RemoveContainer" containerID="be8542090fbb2bae84de3bd70e352895b4072c8c9e1a01e16fccddc89b174f36" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.319112 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.341890 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.355463 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:15:01 crc kubenswrapper[4940]: E1126 07:15:01.355836 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerName="cinder-scheduler" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.355852 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerName="cinder-scheduler" Nov 26 07:15:01 crc kubenswrapper[4940]: E1126 07:15:01.355889 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerName="probe" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.355896 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerName="probe" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.356068 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerName="probe" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.356090 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" containerName="cinder-scheduler" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.356959 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.359309 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.392548 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.440593 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.470718 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6kg2\" (UniqueName: \"kubernetes.io/projected/71365c4a-71fa-4c50-9c71-b87510dcf548-kube-api-access-j6kg2\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.470853 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71365c4a-71fa-4c50-9c71-b87510dcf548-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.470896 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.470925 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.470948 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-scripts\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.471014 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.535241 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-cc77dcc7b-4zbzz"] Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.537537 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-cc77dcc7b-4zbzz" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerName="neutron-httpd" containerID="cri-o://0a70d4bdb0115f7a18ed21eeda87b03d3f5e7e0d25135b6b262a00eda3b5349f" gracePeriod=30 Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.536977 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-cc77dcc7b-4zbzz" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerName="neutron-api" containerID="cri-o://1742331f9d41135d334bd894ddb48235c0ceec5012de2e3b6099441b5178a47c" gracePeriod=30 Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.575738 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71365c4a-71fa-4c50-9c71-b87510dcf548-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.575810 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.575838 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.575856 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-scripts\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.575930 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.575951 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6kg2\" (UniqueName: \"kubernetes.io/projected/71365c4a-71fa-4c50-9c71-b87510dcf548-kube-api-access-j6kg2\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.579517 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71365c4a-71fa-4c50-9c71-b87510dcf548-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.583259 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-scripts\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.590139 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.595226 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.600666 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.610563 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6kg2\" (UniqueName: \"kubernetes.io/projected/71365c4a-71fa-4c50-9c71-b87510dcf548-kube-api-access-j6kg2\") pod \"cinder-scheduler-0\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " pod="openstack/cinder-scheduler-0" Nov 26 07:15:01 crc kubenswrapper[4940]: I1126 07:15:01.678274 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:15:02 crc kubenswrapper[4940]: I1126 07:15:02.162853 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:15:02 crc kubenswrapper[4940]: I1126 07:15:02.303215 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"71365c4a-71fa-4c50-9c71-b87510dcf548","Type":"ContainerStarted","Data":"35a80012c45a955a1127f9daf12c303909b9aafa34d91931ff69d8959b162c09"} Nov 26 07:15:02 crc kubenswrapper[4940]: I1126 07:15:02.312084 4940 generic.go:334] "Generic (PLEG): container finished" podID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerID="0a70d4bdb0115f7a18ed21eeda87b03d3f5e7e0d25135b6b262a00eda3b5349f" exitCode=0 Nov 26 07:15:02 crc kubenswrapper[4940]: I1126 07:15:02.312130 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cc77dcc7b-4zbzz" event={"ID":"4f5c5403-4bbf-4a62-8e53-e0c774252e72","Type":"ContainerDied","Data":"0a70d4bdb0115f7a18ed21eeda87b03d3f5e7e0d25135b6b262a00eda3b5349f"} Nov 26 07:15:02 crc kubenswrapper[4940]: I1126 07:15:02.318512 4940 generic.go:334] "Generic (PLEG): container finished" podID="38381651-2c1c-4c63-8a3c-122d6cd39737" containerID="49eeee8f895af4609ba9d4c461f6a1836b57ee1532776d3a678ee604d4f405d9" exitCode=0 Nov 26 07:15:02 crc kubenswrapper[4940]: I1126 07:15:02.318556 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" event={"ID":"38381651-2c1c-4c63-8a3c-122d6cd39737","Type":"ContainerDied","Data":"49eeee8f895af4609ba9d4c461f6a1836b57ee1532776d3a678ee604d4f405d9"} Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.192465 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a14eebe-34a0-4cba-ad95-6005882bc37f" path="/var/lib/kubelet/pods/2a14eebe-34a0-4cba-ad95-6005882bc37f/volumes" Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.336834 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"71365c4a-71fa-4c50-9c71-b87510dcf548","Type":"ContainerStarted","Data":"240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3"} Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.492795 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.501093 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.746873 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.932279 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnfgr\" (UniqueName: \"kubernetes.io/projected/38381651-2c1c-4c63-8a3c-122d6cd39737-kube-api-access-tnfgr\") pod \"38381651-2c1c-4c63-8a3c-122d6cd39737\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.932370 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38381651-2c1c-4c63-8a3c-122d6cd39737-config-volume\") pod \"38381651-2c1c-4c63-8a3c-122d6cd39737\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.932411 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38381651-2c1c-4c63-8a3c-122d6cd39737-secret-volume\") pod \"38381651-2c1c-4c63-8a3c-122d6cd39737\" (UID: \"38381651-2c1c-4c63-8a3c-122d6cd39737\") " Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.933151 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38381651-2c1c-4c63-8a3c-122d6cd39737-config-volume" (OuterVolumeSpecName: "config-volume") pod "38381651-2c1c-4c63-8a3c-122d6cd39737" (UID: "38381651-2c1c-4c63-8a3c-122d6cd39737"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.933680 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38381651-2c1c-4c63-8a3c-122d6cd39737-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.938240 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38381651-2c1c-4c63-8a3c-122d6cd39737-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "38381651-2c1c-4c63-8a3c-122d6cd39737" (UID: "38381651-2c1c-4c63-8a3c-122d6cd39737"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.939468 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38381651-2c1c-4c63-8a3c-122d6cd39737-kube-api-access-tnfgr" (OuterVolumeSpecName: "kube-api-access-tnfgr") pod "38381651-2c1c-4c63-8a3c-122d6cd39737" (UID: "38381651-2c1c-4c63-8a3c-122d6cd39737"). InnerVolumeSpecName "kube-api-access-tnfgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:03 crc kubenswrapper[4940]: I1126 07:15:03.956838 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 07:15:04 crc kubenswrapper[4940]: I1126 07:15:04.036289 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnfgr\" (UniqueName: \"kubernetes.io/projected/38381651-2c1c-4c63-8a3c-122d6cd39737-kube-api-access-tnfgr\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:04 crc kubenswrapper[4940]: I1126 07:15:04.036319 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38381651-2c1c-4c63-8a3c-122d6cd39737-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:04 crc kubenswrapper[4940]: I1126 07:15:04.347010 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" event={"ID":"38381651-2c1c-4c63-8a3c-122d6cd39737","Type":"ContainerDied","Data":"4e3d48c080913acee565f6273e2ae4f1b6e134bc7d6ae8319d43978a4a7a4dc4"} Nov 26 07:15:04 crc kubenswrapper[4940]: I1126 07:15:04.347058 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e3d48c080913acee565f6273e2ae4f1b6e134bc7d6ae8319d43978a4a7a4dc4" Nov 26 07:15:04 crc kubenswrapper[4940]: I1126 07:15:04.347110 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99" Nov 26 07:15:04 crc kubenswrapper[4940]: I1126 07:15:04.357980 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"71365c4a-71fa-4c50-9c71-b87510dcf548","Type":"ContainerStarted","Data":"f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca"} Nov 26 07:15:04 crc kubenswrapper[4940]: I1126 07:15:04.381733 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.381711153 podStartE2EDuration="3.381711153s" podCreationTimestamp="2025-11-26 07:15:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:15:04.381665761 +0000 UTC m=+1205.901807380" watchObservedRunningTime="2025-11-26 07:15:04.381711153 +0000 UTC m=+1205.901852772" Nov 26 07:15:05 crc kubenswrapper[4940]: I1126 07:15:05.908498 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:15:06 crc kubenswrapper[4940]: I1126 07:15:06.678520 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.533458 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 07:15:10 crc kubenswrapper[4940]: E1126 07:15:10.534575 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38381651-2c1c-4c63-8a3c-122d6cd39737" containerName="collect-profiles" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.534596 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="38381651-2c1c-4c63-8a3c-122d6cd39737" containerName="collect-profiles" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.534922 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="38381651-2c1c-4c63-8a3c-122d6cd39737" containerName="collect-profiles" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.535928 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.537833 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-dd6k5" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.538143 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.538282 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.542769 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.559140 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.559205 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkcmv\" (UniqueName: \"kubernetes.io/projected/6f4d1c84-5193-402a-9264-cd2bb000633c-kube-api-access-mkcmv\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.559299 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.559324 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config-secret\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.660586 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.660628 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config-secret\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.660654 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.660692 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkcmv\" (UniqueName: \"kubernetes.io/projected/6f4d1c84-5193-402a-9264-cd2bb000633c-kube-api-access-mkcmv\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.661553 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.667441 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config-secret\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.667599 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.676769 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkcmv\" (UniqueName: \"kubernetes.io/projected/6f4d1c84-5193-402a-9264-cd2bb000633c-kube-api-access-mkcmv\") pod \"openstackclient\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " pod="openstack/openstackclient" Nov 26 07:15:10 crc kubenswrapper[4940]: I1126 07:15:10.856181 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.315007 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 07:15:11 crc kubenswrapper[4940]: W1126 07:15:11.336997 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f4d1c84_5193_402a_9264_cd2bb000633c.slice/crio-687519751febc4defeb49c7a9a99f702be847f5eb60267c72ed7b527bcbf0ea0 WatchSource:0}: Error finding container 687519751febc4defeb49c7a9a99f702be847f5eb60267c72ed7b527bcbf0ea0: Status 404 returned error can't find the container with id 687519751febc4defeb49c7a9a99f702be847f5eb60267c72ed7b527bcbf0ea0 Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.416359 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6f4d1c84-5193-402a-9264-cd2bb000633c","Type":"ContainerStarted","Data":"687519751febc4defeb49c7a9a99f702be847f5eb60267c72ed7b527bcbf0ea0"} Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.419514 4940 generic.go:334] "Generic (PLEG): container finished" podID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerID="1742331f9d41135d334bd894ddb48235c0ceec5012de2e3b6099441b5178a47c" exitCode=0 Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.419560 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cc77dcc7b-4zbzz" event={"ID":"4f5c5403-4bbf-4a62-8e53-e0c774252e72","Type":"ContainerDied","Data":"1742331f9d41135d334bd894ddb48235c0ceec5012de2e3b6099441b5178a47c"} Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.823948 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.978213 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.988968 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-config\") pod \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.989059 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-httpd-config\") pod \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.989143 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-ovndb-tls-certs\") pod \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.989310 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-combined-ca-bundle\") pod \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.989567 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4xj8\" (UniqueName: \"kubernetes.io/projected/4f5c5403-4bbf-4a62-8e53-e0c774252e72-kube-api-access-p4xj8\") pod \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\" (UID: \"4f5c5403-4bbf-4a62-8e53-e0c774252e72\") " Nov 26 07:15:11 crc kubenswrapper[4940]: I1126 07:15:11.996127 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "4f5c5403-4bbf-4a62-8e53-e0c774252e72" (UID: "4f5c5403-4bbf-4a62-8e53-e0c774252e72"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.006684 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f5c5403-4bbf-4a62-8e53-e0c774252e72-kube-api-access-p4xj8" (OuterVolumeSpecName: "kube-api-access-p4xj8") pod "4f5c5403-4bbf-4a62-8e53-e0c774252e72" (UID: "4f5c5403-4bbf-4a62-8e53-e0c774252e72"). InnerVolumeSpecName "kube-api-access-p4xj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.061540 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f5c5403-4bbf-4a62-8e53-e0c774252e72" (UID: "4f5c5403-4bbf-4a62-8e53-e0c774252e72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.085437 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-config" (OuterVolumeSpecName: "config") pod "4f5c5403-4bbf-4a62-8e53-e0c774252e72" (UID: "4f5c5403-4bbf-4a62-8e53-e0c774252e72"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.091937 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.091978 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.091990 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.092005 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4xj8\" (UniqueName: \"kubernetes.io/projected/4f5c5403-4bbf-4a62-8e53-e0c774252e72-kube-api-access-p4xj8\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.099200 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "4f5c5403-4bbf-4a62-8e53-e0c774252e72" (UID: "4f5c5403-4bbf-4a62-8e53-e0c774252e72"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.194254 4940 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f5c5403-4bbf-4a62-8e53-e0c774252e72-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.211724 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-bf77c95b9-864d6"] Nov 26 07:15:12 crc kubenswrapper[4940]: E1126 07:15:12.212286 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerName="neutron-httpd" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.212309 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerName="neutron-httpd" Nov 26 07:15:12 crc kubenswrapper[4940]: E1126 07:15:12.212318 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerName="neutron-api" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.212325 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerName="neutron-api" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.212584 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerName="neutron-httpd" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.212608 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" containerName="neutron-api" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.214048 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.218711 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.219357 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.219414 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.227066 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-bf77c95b9-864d6"] Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.295526 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-etc-swift\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.295565 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-config-data\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.295586 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-internal-tls-certs\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.295616 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-public-tls-certs\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.295657 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-log-httpd\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.295690 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-run-httpd\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.295712 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpbs2\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-kube-api-access-vpbs2\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.295948 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-combined-ca-bundle\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.397277 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-run-httpd\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.397325 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpbs2\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-kube-api-access-vpbs2\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.397404 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-combined-ca-bundle\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.397766 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-run-httpd\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.398798 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-etc-swift\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.398823 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-config-data\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.398846 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-internal-tls-certs\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.398894 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-public-tls-certs\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.398950 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-log-httpd\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.399378 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-log-httpd\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.401779 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-combined-ca-bundle\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.402600 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-public-tls-certs\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.403659 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-etc-swift\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.403676 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-config-data\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.403840 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-internal-tls-certs\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.412901 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpbs2\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-kube-api-access-vpbs2\") pod \"swift-proxy-bf77c95b9-864d6\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.438955 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-cc77dcc7b-4zbzz" event={"ID":"4f5c5403-4bbf-4a62-8e53-e0c774252e72","Type":"ContainerDied","Data":"e5fb7963395fc3ff1d08da86dda88b7545f6ffc224be80efc471f5921347d059"} Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.439006 4940 scope.go:117] "RemoveContainer" containerID="0a70d4bdb0115f7a18ed21eeda87b03d3f5e7e0d25135b6b262a00eda3b5349f" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.439134 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-cc77dcc7b-4zbzz" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.461197 4940 scope.go:117] "RemoveContainer" containerID="1742331f9d41135d334bd894ddb48235c0ceec5012de2e3b6099441b5178a47c" Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.473091 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-cc77dcc7b-4zbzz"] Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.483686 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-cc77dcc7b-4zbzz"] Nov 26 07:15:12 crc kubenswrapper[4940]: I1126 07:15:12.582970 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.180706 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f5c5403-4bbf-4a62-8e53-e0c774252e72" path="/var/lib/kubelet/pods/4f5c5403-4bbf-4a62-8e53-e0c774252e72/volumes" Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.204699 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-bf77c95b9-864d6"] Nov 26 07:15:13 crc kubenswrapper[4940]: W1126 07:15:13.226593 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01a8836d_ba47_44ef_995e_f5bf2227dcd4.slice/crio-94738a326a2732009ec6dee1d52ac3fd748e031b48f1b630d1b45377b3aabae2 WatchSource:0}: Error finding container 94738a326a2732009ec6dee1d52ac3fd748e031b48f1b630d1b45377b3aabae2: Status 404 returned error can't find the container with id 94738a326a2732009ec6dee1d52ac3fd748e031b48f1b630d1b45377b3aabae2 Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.321259 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.321859 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="ceilometer-central-agent" containerID="cri-o://eafb1e6189953a02a55e573e94f8ea5ae9a8b5ff5c8516a2687e8a27c75a0baf" gracePeriod=30 Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.322304 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="sg-core" containerID="cri-o://90f372e19617297639de26fe21eb86f93a4ac333d8bd31103b6cd4c11f12608c" gracePeriod=30 Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.322398 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="proxy-httpd" containerID="cri-o://b1625ffcb166a1e88167a4dbbbf8453100b92e8da85bee70c26bf2f20facfb63" gracePeriod=30 Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.322316 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="ceilometer-notification-agent" containerID="cri-o://e5c491f5a6227127fb6763832393e74065c6bfda0b03329eb5cc0df21024c4cd" gracePeriod=30 Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.353446 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 07:15:13 crc kubenswrapper[4940]: I1126 07:15:13.500255 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-bf77c95b9-864d6" event={"ID":"01a8836d-ba47-44ef-995e-f5bf2227dcd4","Type":"ContainerStarted","Data":"94738a326a2732009ec6dee1d52ac3fd748e031b48f1b630d1b45377b3aabae2"} Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.516511 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-bf77c95b9-864d6" event={"ID":"01a8836d-ba47-44ef-995e-f5bf2227dcd4","Type":"ContainerStarted","Data":"8c41b0e8cdcb6ee2abbf477b7026854cbe03716deb4b56286f7d6da58714b16d"} Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.516892 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.516908 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-bf77c95b9-864d6" event={"ID":"01a8836d-ba47-44ef-995e-f5bf2227dcd4","Type":"ContainerStarted","Data":"9c7f2f809a503ac5e53851c2de3c5ae678c6834ac46092d7cc5557057bdccf99"} Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.520151 4940 generic.go:334] "Generic (PLEG): container finished" podID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerID="b1625ffcb166a1e88167a4dbbbf8453100b92e8da85bee70c26bf2f20facfb63" exitCode=0 Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.520187 4940 generic.go:334] "Generic (PLEG): container finished" podID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerID="90f372e19617297639de26fe21eb86f93a4ac333d8bd31103b6cd4c11f12608c" exitCode=2 Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.520197 4940 generic.go:334] "Generic (PLEG): container finished" podID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerID="eafb1e6189953a02a55e573e94f8ea5ae9a8b5ff5c8516a2687e8a27c75a0baf" exitCode=0 Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.520219 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerDied","Data":"b1625ffcb166a1e88167a4dbbbf8453100b92e8da85bee70c26bf2f20facfb63"} Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.520241 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerDied","Data":"90f372e19617297639de26fe21eb86f93a4ac333d8bd31103b6cd4c11f12608c"} Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.520251 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerDied","Data":"eafb1e6189953a02a55e573e94f8ea5ae9a8b5ff5c8516a2687e8a27c75a0baf"} Nov 26 07:15:14 crc kubenswrapper[4940]: I1126 07:15:14.540141 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-bf77c95b9-864d6" podStartSLOduration=2.540121622 podStartE2EDuration="2.540121622s" podCreationTimestamp="2025-11-26 07:15:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:15:14.536505517 +0000 UTC m=+1216.056647136" watchObservedRunningTime="2025-11-26 07:15:14.540121622 +0000 UTC m=+1216.060263241" Nov 26 07:15:15 crc kubenswrapper[4940]: I1126 07:15:15.528883 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:16 crc kubenswrapper[4940]: E1126 07:15:16.945991 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb1f11d1_eec4_40ad_8249_2800ef977b08.slice/crio-conmon-e5c491f5a6227127fb6763832393e74065c6bfda0b03329eb5cc0df21024c4cd.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:15:17 crc kubenswrapper[4940]: I1126 07:15:17.552512 4940 generic.go:334] "Generic (PLEG): container finished" podID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerID="e5c491f5a6227127fb6763832393e74065c6bfda0b03329eb5cc0df21024c4cd" exitCode=0 Nov 26 07:15:17 crc kubenswrapper[4940]: I1126 07:15:17.552579 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerDied","Data":"e5c491f5a6227127fb6763832393e74065c6bfda0b03329eb5cc0df21024c4cd"} Nov 26 07:15:18 crc kubenswrapper[4940]: I1126 07:15:18.507296 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.162:3000/\": dial tcp 10.217.0.162:3000: connect: connection refused" Nov 26 07:15:19 crc kubenswrapper[4940]: I1126 07:15:19.551196 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:15:19 crc kubenswrapper[4940]: I1126 07:15:19.551809 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="7204b63f-239d-4f0e-96ce-1cf0ad85382a" containerName="kube-state-metrics" containerID="cri-o://87ece76ba7dc37fedad0f4dfdecb3831b0379e05c8f03d8362432bd1dd10fe9b" gracePeriod=30 Nov 26 07:15:20 crc kubenswrapper[4940]: I1126 07:15:20.578209 4940 generic.go:334] "Generic (PLEG): container finished" podID="7204b63f-239d-4f0e-96ce-1cf0ad85382a" containerID="87ece76ba7dc37fedad0f4dfdecb3831b0379e05c8f03d8362432bd1dd10fe9b" exitCode=2 Nov 26 07:15:20 crc kubenswrapper[4940]: I1126 07:15:20.578247 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7204b63f-239d-4f0e-96ce-1cf0ad85382a","Type":"ContainerDied","Data":"87ece76ba7dc37fedad0f4dfdecb3831b0379e05c8f03d8362432bd1dd10fe9b"} Nov 26 07:15:21 crc kubenswrapper[4940]: I1126 07:15:21.270614 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="7204b63f-239d-4f0e-96ce-1cf0ad85382a" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": dial tcp 10.217.0.106:8081: connect: connection refused" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.510785 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.589979 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.591952 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.598887 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7204b63f-239d-4f0e-96ce-1cf0ad85382a","Type":"ContainerDied","Data":"a1fa4cbc35e749451233b43051cf39d7fc9bd25da9d92292216f41723851d1a0"} Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.598899 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.599255 4940 scope.go:117] "RemoveContainer" containerID="87ece76ba7dc37fedad0f4dfdecb3831b0379e05c8f03d8362432bd1dd10fe9b" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.602725 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6f4d1c84-5193-402a-9264-cd2bb000633c","Type":"ContainerStarted","Data":"02981c04b83b2ef74e7cdd7032106bee3b1fd93d22f221e3394e2f19eaf07d07"} Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.613056 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bv7p\" (UniqueName: \"kubernetes.io/projected/7204b63f-239d-4f0e-96ce-1cf0ad85382a-kube-api-access-6bv7p\") pod \"7204b63f-239d-4f0e-96ce-1cf0ad85382a\" (UID: \"7204b63f-239d-4f0e-96ce-1cf0ad85382a\") " Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.618603 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7204b63f-239d-4f0e-96ce-1cf0ad85382a-kube-api-access-6bv7p" (OuterVolumeSpecName: "kube-api-access-6bv7p") pod "7204b63f-239d-4f0e-96ce-1cf0ad85382a" (UID: "7204b63f-239d-4f0e-96ce-1cf0ad85382a"). InnerVolumeSpecName "kube-api-access-6bv7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.621989 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.659905 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.7602468980000001 podStartE2EDuration="12.659860733s" podCreationTimestamp="2025-11-26 07:15:10 +0000 UTC" firstStartedPulling="2025-11-26 07:15:11.339793522 +0000 UTC m=+1212.859935141" lastFinishedPulling="2025-11-26 07:15:22.239407357 +0000 UTC m=+1223.759548976" observedRunningTime="2025-11-26 07:15:22.65347933 +0000 UTC m=+1224.173620949" watchObservedRunningTime="2025-11-26 07:15:22.659860733 +0000 UTC m=+1224.180002352" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.719576 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-run-httpd\") pod \"cb1f11d1-eec4-40ad-8249-2800ef977b08\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.719643 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-log-httpd\") pod \"cb1f11d1-eec4-40ad-8249-2800ef977b08\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.719745 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-config-data\") pod \"cb1f11d1-eec4-40ad-8249-2800ef977b08\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.719769 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-combined-ca-bundle\") pod \"cb1f11d1-eec4-40ad-8249-2800ef977b08\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.719796 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-sg-core-conf-yaml\") pod \"cb1f11d1-eec4-40ad-8249-2800ef977b08\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.719816 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dp5h\" (UniqueName: \"kubernetes.io/projected/cb1f11d1-eec4-40ad-8249-2800ef977b08-kube-api-access-4dp5h\") pod \"cb1f11d1-eec4-40ad-8249-2800ef977b08\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.719940 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-scripts\") pod \"cb1f11d1-eec4-40ad-8249-2800ef977b08\" (UID: \"cb1f11d1-eec4-40ad-8249-2800ef977b08\") " Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.720579 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cb1f11d1-eec4-40ad-8249-2800ef977b08" (UID: "cb1f11d1-eec4-40ad-8249-2800ef977b08"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.721284 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bv7p\" (UniqueName: \"kubernetes.io/projected/7204b63f-239d-4f0e-96ce-1cf0ad85382a-kube-api-access-6bv7p\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.721300 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.721373 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cb1f11d1-eec4-40ad-8249-2800ef977b08" (UID: "cb1f11d1-eec4-40ad-8249-2800ef977b08"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.725325 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb1f11d1-eec4-40ad-8249-2800ef977b08-kube-api-access-4dp5h" (OuterVolumeSpecName: "kube-api-access-4dp5h") pod "cb1f11d1-eec4-40ad-8249-2800ef977b08" (UID: "cb1f11d1-eec4-40ad-8249-2800ef977b08"). InnerVolumeSpecName "kube-api-access-4dp5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.731428 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-scripts" (OuterVolumeSpecName: "scripts") pod "cb1f11d1-eec4-40ad-8249-2800ef977b08" (UID: "cb1f11d1-eec4-40ad-8249-2800ef977b08"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.746079 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cb1f11d1-eec4-40ad-8249-2800ef977b08" (UID: "cb1f11d1-eec4-40ad-8249-2800ef977b08"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.822190 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb1f11d1-eec4-40ad-8249-2800ef977b08" (UID: "cb1f11d1-eec4-40ad-8249-2800ef977b08"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.823383 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.823412 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cb1f11d1-eec4-40ad-8249-2800ef977b08-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.823425 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.823438 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.823449 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dp5h\" (UniqueName: \"kubernetes.io/projected/cb1f11d1-eec4-40ad-8249-2800ef977b08-kube-api-access-4dp5h\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.860220 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-config-data" (OuterVolumeSpecName: "config-data") pod "cb1f11d1-eec4-40ad-8249-2800ef977b08" (UID: "cb1f11d1-eec4-40ad-8249-2800ef977b08"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.924874 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb1f11d1-eec4-40ad-8249-2800ef977b08-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.934105 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.951399 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.963697 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:15:22 crc kubenswrapper[4940]: E1126 07:15:22.964193 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="proxy-httpd" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964213 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="proxy-httpd" Nov 26 07:15:22 crc kubenswrapper[4940]: E1126 07:15:22.964238 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7204b63f-239d-4f0e-96ce-1cf0ad85382a" containerName="kube-state-metrics" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964247 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7204b63f-239d-4f0e-96ce-1cf0ad85382a" containerName="kube-state-metrics" Nov 26 07:15:22 crc kubenswrapper[4940]: E1126 07:15:22.964264 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="ceilometer-central-agent" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964272 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="ceilometer-central-agent" Nov 26 07:15:22 crc kubenswrapper[4940]: E1126 07:15:22.964290 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="sg-core" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964297 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="sg-core" Nov 26 07:15:22 crc kubenswrapper[4940]: E1126 07:15:22.964312 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="ceilometer-notification-agent" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964320 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="ceilometer-notification-agent" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964538 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="ceilometer-central-agent" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964560 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="proxy-httpd" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964586 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7204b63f-239d-4f0e-96ce-1cf0ad85382a" containerName="kube-state-metrics" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964599 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="ceilometer-notification-agent" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.964611 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" containerName="sg-core" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.965336 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.970431 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.970881 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 26 07:15:22 crc kubenswrapper[4940]: I1126 07:15:22.974572 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.025984 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.026365 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9q82\" (UniqueName: \"kubernetes.io/projected/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-api-access-s9q82\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.026520 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.026598 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.127647 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.127705 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.127831 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.127866 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9q82\" (UniqueName: \"kubernetes.io/projected/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-api-access-s9q82\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.134821 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.135344 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.137457 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.149178 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9q82\" (UniqueName: \"kubernetes.io/projected/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-api-access-s9q82\") pod \"kube-state-metrics-0\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.177166 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7204b63f-239d-4f0e-96ce-1cf0ad85382a" path="/var/lib/kubelet/pods/7204b63f-239d-4f0e-96ce-1cf0ad85382a/volumes" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.288194 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.616126 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.618318 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cb1f11d1-eec4-40ad-8249-2800ef977b08","Type":"ContainerDied","Data":"0264dcfcfc9015366960f1683f7b1d499dc6d04584ef405654b3d1018cf27e51"} Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.619418 4940 scope.go:117] "RemoveContainer" containerID="b1625ffcb166a1e88167a4dbbbf8453100b92e8da85bee70c26bf2f20facfb63" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.652356 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.662358 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.668461 4940 scope.go:117] "RemoveContainer" containerID="90f372e19617297639de26fe21eb86f93a4ac333d8bd31103b6cd4c11f12608c" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.674056 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.676200 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.682676 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.682946 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.683263 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.696895 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.707852 4940 scope.go:117] "RemoveContainer" containerID="e5c491f5a6227127fb6763832393e74065c6bfda0b03329eb5cc0df21024c4cd" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.740994 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-log-httpd\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.741238 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.741331 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.741377 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-scripts\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.741394 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.741558 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmgp5\" (UniqueName: \"kubernetes.io/projected/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-kube-api-access-pmgp5\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.741592 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-config-data\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.741612 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-run-httpd\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.745246 4940 scope.go:117] "RemoveContainer" containerID="eafb1e6189953a02a55e573e94f8ea5ae9a8b5ff5c8516a2687e8a27c75a0baf" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.756095 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.843751 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.844228 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.844276 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-scripts\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.844312 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.844355 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmgp5\" (UniqueName: \"kubernetes.io/projected/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-kube-api-access-pmgp5\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.844386 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-config-data\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.844406 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-run-httpd\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.844437 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-log-httpd\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.844905 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-log-httpd\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.845325 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-run-httpd\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.850090 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.850243 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.850327 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.850347 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-config-data\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.853627 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-scripts\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:23 crc kubenswrapper[4940]: I1126 07:15:23.867168 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmgp5\" (UniqueName: \"kubernetes.io/projected/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-kube-api-access-pmgp5\") pod \"ceilometer-0\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " pod="openstack/ceilometer-0" Nov 26 07:15:24 crc kubenswrapper[4940]: I1126 07:15:24.000722 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:24 crc kubenswrapper[4940]: I1126 07:15:24.476574 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:24 crc kubenswrapper[4940]: W1126 07:15:24.477497 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8bdfbd0a_7100_46b7_a443_6c9ddbafb98f.slice/crio-f022f30d52394bdabd252a341f52e7753957dce0be1a3e665d0482182ca7338e WatchSource:0}: Error finding container f022f30d52394bdabd252a341f52e7753957dce0be1a3e665d0482182ca7338e: Status 404 returned error can't find the container with id f022f30d52394bdabd252a341f52e7753957dce0be1a3e665d0482182ca7338e Nov 26 07:15:24 crc kubenswrapper[4940]: I1126 07:15:24.625950 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3031ed6c-4ad3-4d47-a902-4a52bb40be6d","Type":"ContainerStarted","Data":"dc48ce55d4720539d72187d51174a10a64823a9c28973e4e5b3bfa0a2a2a6c23"} Nov 26 07:15:24 crc kubenswrapper[4940]: I1126 07:15:24.626024 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3031ed6c-4ad3-4d47-a902-4a52bb40be6d","Type":"ContainerStarted","Data":"892023edda3e99bb3d18e095752c48347362d756dd0436445985e339ced62864"} Nov 26 07:15:24 crc kubenswrapper[4940]: I1126 07:15:24.626248 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 07:15:24 crc kubenswrapper[4940]: I1126 07:15:24.627558 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerStarted","Data":"f022f30d52394bdabd252a341f52e7753957dce0be1a3e665d0482182ca7338e"} Nov 26 07:15:24 crc kubenswrapper[4940]: I1126 07:15:24.650578 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.271983821 podStartE2EDuration="2.650560378s" podCreationTimestamp="2025-11-26 07:15:22 +0000 UTC" firstStartedPulling="2025-11-26 07:15:23.771676374 +0000 UTC m=+1225.291817993" lastFinishedPulling="2025-11-26 07:15:24.150252931 +0000 UTC m=+1225.670394550" observedRunningTime="2025-11-26 07:15:24.640257812 +0000 UTC m=+1226.160399431" watchObservedRunningTime="2025-11-26 07:15:24.650560378 +0000 UTC m=+1226.170701997" Nov 26 07:15:25 crc kubenswrapper[4940]: I1126 07:15:25.178688 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb1f11d1-eec4-40ad-8249-2800ef977b08" path="/var/lib/kubelet/pods/cb1f11d1-eec4-40ad-8249-2800ef977b08/volumes" Nov 26 07:15:25 crc kubenswrapper[4940]: I1126 07:15:25.646099 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerStarted","Data":"b6e1c4625e97355a0fe0b8a594ce2614bec2811d433a4b978c8ba509446fe28f"} Nov 26 07:15:26 crc kubenswrapper[4940]: I1126 07:15:26.242849 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:26 crc kubenswrapper[4940]: I1126 07:15:26.668007 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerStarted","Data":"43a7762398dad13753c984843344a87476a120cf255f7b546b0b0863cc22a8be"} Nov 26 07:15:27 crc kubenswrapper[4940]: I1126 07:15:27.685592 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerStarted","Data":"accbdd0a87a62dcaa8a1369d852c0e0f498d0fcb0a19728b7f616bf21d8918c5"} Nov 26 07:15:28 crc kubenswrapper[4940]: I1126 07:15:28.694164 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerStarted","Data":"f53d0795cc850bda7285285cc6b6026c59a493da41e84bd627074b51135359b5"} Nov 26 07:15:28 crc kubenswrapper[4940]: I1126 07:15:28.694463 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:15:28 crc kubenswrapper[4940]: I1126 07:15:28.694345 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="sg-core" containerID="cri-o://accbdd0a87a62dcaa8a1369d852c0e0f498d0fcb0a19728b7f616bf21d8918c5" gracePeriod=30 Nov 26 07:15:28 crc kubenswrapper[4940]: I1126 07:15:28.694372 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="proxy-httpd" containerID="cri-o://f53d0795cc850bda7285285cc6b6026c59a493da41e84bd627074b51135359b5" gracePeriod=30 Nov 26 07:15:28 crc kubenswrapper[4940]: I1126 07:15:28.694409 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="ceilometer-notification-agent" containerID="cri-o://43a7762398dad13753c984843344a87476a120cf255f7b546b0b0863cc22a8be" gracePeriod=30 Nov 26 07:15:28 crc kubenswrapper[4940]: I1126 07:15:28.694285 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="ceilometer-central-agent" containerID="cri-o://b6e1c4625e97355a0fe0b8a594ce2614bec2811d433a4b978c8ba509446fe28f" gracePeriod=30 Nov 26 07:15:28 crc kubenswrapper[4940]: I1126 07:15:28.722882 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.451638505 podStartE2EDuration="5.722601346s" podCreationTimestamp="2025-11-26 07:15:23 +0000 UTC" firstStartedPulling="2025-11-26 07:15:24.480070791 +0000 UTC m=+1226.000212420" lastFinishedPulling="2025-11-26 07:15:27.751033642 +0000 UTC m=+1229.271175261" observedRunningTime="2025-11-26 07:15:28.71295772 +0000 UTC m=+1230.233099339" watchObservedRunningTime="2025-11-26 07:15:28.722601346 +0000 UTC m=+1230.242742965" Nov 26 07:15:29 crc kubenswrapper[4940]: I1126 07:15:29.708608 4940 generic.go:334] "Generic (PLEG): container finished" podID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerID="f53d0795cc850bda7285285cc6b6026c59a493da41e84bd627074b51135359b5" exitCode=0 Nov 26 07:15:29 crc kubenswrapper[4940]: I1126 07:15:29.708848 4940 generic.go:334] "Generic (PLEG): container finished" podID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerID="accbdd0a87a62dcaa8a1369d852c0e0f498d0fcb0a19728b7f616bf21d8918c5" exitCode=2 Nov 26 07:15:29 crc kubenswrapper[4940]: I1126 07:15:29.708857 4940 generic.go:334] "Generic (PLEG): container finished" podID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerID="43a7762398dad13753c984843344a87476a120cf255f7b546b0b0863cc22a8be" exitCode=0 Nov 26 07:15:29 crc kubenswrapper[4940]: I1126 07:15:29.708688 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerDied","Data":"f53d0795cc850bda7285285cc6b6026c59a493da41e84bd627074b51135359b5"} Nov 26 07:15:29 crc kubenswrapper[4940]: I1126 07:15:29.708892 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerDied","Data":"accbdd0a87a62dcaa8a1369d852c0e0f498d0fcb0a19728b7f616bf21d8918c5"} Nov 26 07:15:29 crc kubenswrapper[4940]: I1126 07:15:29.708906 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerDied","Data":"43a7762398dad13753c984843344a87476a120cf255f7b546b0b0863cc22a8be"} Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.617332 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-gb562"] Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.618780 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.654988 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-gb562"] Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.666815 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4fzq\" (UniqueName: \"kubernetes.io/projected/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-kube-api-access-h4fzq\") pod \"nova-api-db-create-gb562\" (UID: \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\") " pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.666868 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-operator-scripts\") pod \"nova-api-db-create-gb562\" (UID: \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\") " pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.706553 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-pp2g5"] Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.708022 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.714010 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pp2g5"] Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.768219 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-operator-scripts\") pod \"nova-cell0-db-create-pp2g5\" (UID: \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\") " pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.768337 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2fnc\" (UniqueName: \"kubernetes.io/projected/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-kube-api-access-f2fnc\") pod \"nova-cell0-db-create-pp2g5\" (UID: \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\") " pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.768379 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4fzq\" (UniqueName: \"kubernetes.io/projected/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-kube-api-access-h4fzq\") pod \"nova-api-db-create-gb562\" (UID: \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\") " pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.768415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-operator-scripts\") pod \"nova-api-db-create-gb562\" (UID: \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\") " pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.769062 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-operator-scripts\") pod \"nova-api-db-create-gb562\" (UID: \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\") " pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.795594 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4fzq\" (UniqueName: \"kubernetes.io/projected/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-kube-api-access-h4fzq\") pod \"nova-api-db-create-gb562\" (UID: \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\") " pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.804427 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-31fa-account-create-update-vxmcw"] Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.805888 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.811305 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.838544 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-31fa-account-create-update-vxmcw"] Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.870213 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2fnc\" (UniqueName: \"kubernetes.io/projected/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-kube-api-access-f2fnc\") pod \"nova-cell0-db-create-pp2g5\" (UID: \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\") " pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.870283 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-operator-scripts\") pod \"nova-api-31fa-account-create-update-vxmcw\" (UID: \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\") " pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.870397 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qh65\" (UniqueName: \"kubernetes.io/projected/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-kube-api-access-2qh65\") pod \"nova-api-31fa-account-create-update-vxmcw\" (UID: \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\") " pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.870440 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-operator-scripts\") pod \"nova-cell0-db-create-pp2g5\" (UID: \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\") " pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.871133 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-operator-scripts\") pod \"nova-cell0-db-create-pp2g5\" (UID: \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\") " pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.890761 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2fnc\" (UniqueName: \"kubernetes.io/projected/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-kube-api-access-f2fnc\") pod \"nova-cell0-db-create-pp2g5\" (UID: \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\") " pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.895232 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-bmbz8"] Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.897184 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.904754 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-bmbz8"] Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.957535 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.975190 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qh65\" (UniqueName: \"kubernetes.io/projected/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-kube-api-access-2qh65\") pod \"nova-api-31fa-account-create-update-vxmcw\" (UID: \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\") " pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.975248 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks8r9\" (UniqueName: \"kubernetes.io/projected/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-kube-api-access-ks8r9\") pod \"nova-cell1-db-create-bmbz8\" (UID: \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\") " pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.975295 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-operator-scripts\") pod \"nova-cell1-db-create-bmbz8\" (UID: \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\") " pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.975360 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-operator-scripts\") pod \"nova-api-31fa-account-create-update-vxmcw\" (UID: \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\") " pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.975990 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-operator-scripts\") pod \"nova-api-31fa-account-create-update-vxmcw\" (UID: \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\") " pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:30 crc kubenswrapper[4940]: I1126 07:15:30.992579 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qh65\" (UniqueName: \"kubernetes.io/projected/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-kube-api-access-2qh65\") pod \"nova-api-31fa-account-create-update-vxmcw\" (UID: \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\") " pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.040651 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-ee23-account-create-update-j7nlg"] Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.042855 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ee23-account-create-update-j7nlg"] Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.042943 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.046086 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.047130 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.076481 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39eba712-649b-4509-be2d-72bb08e292e5-operator-scripts\") pod \"nova-cell0-ee23-account-create-update-j7nlg\" (UID: \"39eba712-649b-4509-be2d-72bb08e292e5\") " pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.076575 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks8r9\" (UniqueName: \"kubernetes.io/projected/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-kube-api-access-ks8r9\") pod \"nova-cell1-db-create-bmbz8\" (UID: \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\") " pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.076611 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4vgm\" (UniqueName: \"kubernetes.io/projected/39eba712-649b-4509-be2d-72bb08e292e5-kube-api-access-k4vgm\") pod \"nova-cell0-ee23-account-create-update-j7nlg\" (UID: \"39eba712-649b-4509-be2d-72bb08e292e5\") " pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.076683 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-operator-scripts\") pod \"nova-cell1-db-create-bmbz8\" (UID: \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\") " pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.078325 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-operator-scripts\") pod \"nova-cell1-db-create-bmbz8\" (UID: \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\") " pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.096751 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks8r9\" (UniqueName: \"kubernetes.io/projected/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-kube-api-access-ks8r9\") pod \"nova-cell1-db-create-bmbz8\" (UID: \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\") " pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.158855 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.183357 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39eba712-649b-4509-be2d-72bb08e292e5-operator-scripts\") pod \"nova-cell0-ee23-account-create-update-j7nlg\" (UID: \"39eba712-649b-4509-be2d-72bb08e292e5\") " pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.183422 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4vgm\" (UniqueName: \"kubernetes.io/projected/39eba712-649b-4509-be2d-72bb08e292e5-kube-api-access-k4vgm\") pod \"nova-cell0-ee23-account-create-update-j7nlg\" (UID: \"39eba712-649b-4509-be2d-72bb08e292e5\") " pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.184386 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39eba712-649b-4509-be2d-72bb08e292e5-operator-scripts\") pod \"nova-cell0-ee23-account-create-update-j7nlg\" (UID: \"39eba712-649b-4509-be2d-72bb08e292e5\") " pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.224958 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4vgm\" (UniqueName: \"kubernetes.io/projected/39eba712-649b-4509-be2d-72bb08e292e5-kube-api-access-k4vgm\") pod \"nova-cell0-ee23-account-create-update-j7nlg\" (UID: \"39eba712-649b-4509-be2d-72bb08e292e5\") " pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.238330 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-d6fe-account-create-update-v2428"] Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.240217 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.242274 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.250127 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.254828 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d6fe-account-create-update-v2428"] Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.285550 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/742ee5ec-ca7e-47f2-becd-5352810d275f-operator-scripts\") pod \"nova-cell1-d6fe-account-create-update-v2428\" (UID: \"742ee5ec-ca7e-47f2-becd-5352810d275f\") " pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.286484 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9p7j\" (UniqueName: \"kubernetes.io/projected/742ee5ec-ca7e-47f2-becd-5352810d275f-kube-api-access-r9p7j\") pod \"nova-cell1-d6fe-account-create-update-v2428\" (UID: \"742ee5ec-ca7e-47f2-becd-5352810d275f\") " pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.387758 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9p7j\" (UniqueName: \"kubernetes.io/projected/742ee5ec-ca7e-47f2-becd-5352810d275f-kube-api-access-r9p7j\") pod \"nova-cell1-d6fe-account-create-update-v2428\" (UID: \"742ee5ec-ca7e-47f2-becd-5352810d275f\") " pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.388145 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/742ee5ec-ca7e-47f2-becd-5352810d275f-operator-scripts\") pod \"nova-cell1-d6fe-account-create-update-v2428\" (UID: \"742ee5ec-ca7e-47f2-becd-5352810d275f\") " pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.388856 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/742ee5ec-ca7e-47f2-becd-5352810d275f-operator-scripts\") pod \"nova-cell1-d6fe-account-create-update-v2428\" (UID: \"742ee5ec-ca7e-47f2-becd-5352810d275f\") " pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.404552 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9p7j\" (UniqueName: \"kubernetes.io/projected/742ee5ec-ca7e-47f2-becd-5352810d275f-kube-api-access-r9p7j\") pod \"nova-cell1-d6fe-account-create-update-v2428\" (UID: \"742ee5ec-ca7e-47f2-becd-5352810d275f\") " pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.415946 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.534357 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-gb562"] Nov 26 07:15:31 crc kubenswrapper[4940]: W1126 07:15:31.559914 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fa5aea3_ec9f_41d6_b1e7_19912ab7c20b.slice/crio-eec88fb839a88d9d429499a00a34990e628fa6af9d44149681700cd1a6fa6914 WatchSource:0}: Error finding container eec88fb839a88d9d429499a00a34990e628fa6af9d44149681700cd1a6fa6914: Status 404 returned error can't find the container with id eec88fb839a88d9d429499a00a34990e628fa6af9d44149681700cd1a6fa6914 Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.571801 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.603768 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pp2g5"] Nov 26 07:15:31 crc kubenswrapper[4940]: W1126 07:15:31.613861 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f9f3ed0_d8b5_41b4_be9a_982f5f8cfde1.slice/crio-3b7646bd5977a34a4f27458fd0cd6f389b658ea3153d29e349f0a8d64cd9bd2c WatchSource:0}: Error finding container 3b7646bd5977a34a4f27458fd0cd6f389b658ea3153d29e349f0a8d64cd9bd2c: Status 404 returned error can't find the container with id 3b7646bd5977a34a4f27458fd0cd6f389b658ea3153d29e349f0a8d64cd9bd2c Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.700362 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-31fa-account-create-update-vxmcw"] Nov 26 07:15:31 crc kubenswrapper[4940]: W1126 07:15:31.720425 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95b33e62_98cb_4ebe_8fa9_e1e762ee3352.slice/crio-d3a4c51fb8f023c84ea6094a363512ab12eaf9ded150edf2b232c962830fcb75 WatchSource:0}: Error finding container d3a4c51fb8f023c84ea6094a363512ab12eaf9ded150edf2b232c962830fcb75: Status 404 returned error can't find the container with id d3a4c51fb8f023c84ea6094a363512ab12eaf9ded150edf2b232c962830fcb75 Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.747652 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-gb562" event={"ID":"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b","Type":"ContainerStarted","Data":"eec88fb839a88d9d429499a00a34990e628fa6af9d44149681700cd1a6fa6914"} Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.749382 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pp2g5" event={"ID":"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1","Type":"ContainerStarted","Data":"3b7646bd5977a34a4f27458fd0cd6f389b658ea3153d29e349f0a8d64cd9bd2c"} Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.756375 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-31fa-account-create-update-vxmcw" event={"ID":"95b33e62-98cb-4ebe-8fa9-e1e762ee3352","Type":"ContainerStarted","Data":"d3a4c51fb8f023c84ea6094a363512ab12eaf9ded150edf2b232c962830fcb75"} Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.831693 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-bmbz8"] Nov 26 07:15:31 crc kubenswrapper[4940]: W1126 07:15:31.839164 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3d6fb0b_a20a_41c3_ba0b_e3cdcc8bebc8.slice/crio-b04a7d3cb7942f8062b9e72013aed2b10fd87adf70385de223194a7bbb71f242 WatchSource:0}: Error finding container b04a7d3cb7942f8062b9e72013aed2b10fd87adf70385de223194a7bbb71f242: Status 404 returned error can't find the container with id b04a7d3cb7942f8062b9e72013aed2b10fd87adf70385de223194a7bbb71f242 Nov 26 07:15:31 crc kubenswrapper[4940]: I1126 07:15:31.916534 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ee23-account-create-update-j7nlg"] Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.089940 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d6fe-account-create-update-v2428"] Nov 26 07:15:32 crc kubenswrapper[4940]: W1126 07:15:32.104813 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod742ee5ec_ca7e_47f2_becd_5352810d275f.slice/crio-f9bd24e02b520124be4a43c202a621cc601c8a78d9ffc2ee990a19e2c4180046 WatchSource:0}: Error finding container f9bd24e02b520124be4a43c202a621cc601c8a78d9ffc2ee990a19e2c4180046: Status 404 returned error can't find the container with id f9bd24e02b520124be4a43c202a621cc601c8a78d9ffc2ee990a19e2c4180046 Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.767108 4940 generic.go:334] "Generic (PLEG): container finished" podID="95b33e62-98cb-4ebe-8fa9-e1e762ee3352" containerID="9a08f7c810165e49dfe9ccdca40dca17e1157ad42f3c6e09fe28969dda99fecd" exitCode=0 Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.767160 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-31fa-account-create-update-vxmcw" event={"ID":"95b33e62-98cb-4ebe-8fa9-e1e762ee3352","Type":"ContainerDied","Data":"9a08f7c810165e49dfe9ccdca40dca17e1157ad42f3c6e09fe28969dda99fecd"} Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.769920 4940 generic.go:334] "Generic (PLEG): container finished" podID="742ee5ec-ca7e-47f2-becd-5352810d275f" containerID="702669136ea3ef57ecdc9f743c124a1da4686d32e188db2479f4eac18212047b" exitCode=0 Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.769983 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d6fe-account-create-update-v2428" event={"ID":"742ee5ec-ca7e-47f2-becd-5352810d275f","Type":"ContainerDied","Data":"702669136ea3ef57ecdc9f743c124a1da4686d32e188db2479f4eac18212047b"} Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.770009 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d6fe-account-create-update-v2428" event={"ID":"742ee5ec-ca7e-47f2-becd-5352810d275f","Type":"ContainerStarted","Data":"f9bd24e02b520124be4a43c202a621cc601c8a78d9ffc2ee990a19e2c4180046"} Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.772176 4940 generic.go:334] "Generic (PLEG): container finished" podID="f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8" containerID="b65703564f9318a7bf88f52111da348fe0e8f2f2747fbd28dcc03006797af0bc" exitCode=0 Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.772230 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bmbz8" event={"ID":"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8","Type":"ContainerDied","Data":"b65703564f9318a7bf88f52111da348fe0e8f2f2747fbd28dcc03006797af0bc"} Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.772251 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bmbz8" event={"ID":"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8","Type":"ContainerStarted","Data":"b04a7d3cb7942f8062b9e72013aed2b10fd87adf70385de223194a7bbb71f242"} Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.774143 4940 generic.go:334] "Generic (PLEG): container finished" podID="2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b" containerID="d498985319cb68f63f891777bc19a7745369f745cdaae2d1da4234f423bbc721" exitCode=0 Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.774241 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-gb562" event={"ID":"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b","Type":"ContainerDied","Data":"d498985319cb68f63f891777bc19a7745369f745cdaae2d1da4234f423bbc721"} Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.776846 4940 generic.go:334] "Generic (PLEG): container finished" podID="1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1" containerID="d72cd9ac728cbf3b30106e379fabf798c41b42cb28d594cc7033ef0b760e6a02" exitCode=0 Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.776916 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pp2g5" event={"ID":"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1","Type":"ContainerDied","Data":"d72cd9ac728cbf3b30106e379fabf798c41b42cb28d594cc7033ef0b760e6a02"} Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.778518 4940 generic.go:334] "Generic (PLEG): container finished" podID="39eba712-649b-4509-be2d-72bb08e292e5" containerID="e5fa80963950fb43968ab06d391a280351e421fdee0cb75e947fe3346d50eda0" exitCode=0 Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.778565 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" event={"ID":"39eba712-649b-4509-be2d-72bb08e292e5","Type":"ContainerDied","Data":"e5fa80963950fb43968ab06d391a280351e421fdee0cb75e947fe3346d50eda0"} Nov 26 07:15:32 crc kubenswrapper[4940]: I1126 07:15:32.778597 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" event={"ID":"39eba712-649b-4509-be2d-72bb08e292e5","Type":"ContainerStarted","Data":"1438885abc72258e32ff8a64d0f315ab63b633324177c52f79cbf3a3b0a077c4"} Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.302650 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.475614 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.475987 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerName="glance-log" containerID="cri-o://3ffa118f5f527489dc345765e83342efe059f394a1846bd7fa6ddd85e3436876" gracePeriod=30 Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.476229 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerName="glance-httpd" containerID="cri-o://c699e8ed09c1b1959948a2a83983b2a73362370ad13942383ea39dc8e7bc31d5" gracePeriod=30 Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.791345 4940 generic.go:334] "Generic (PLEG): container finished" podID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerID="3ffa118f5f527489dc345765e83342efe059f394a1846bd7fa6ddd85e3436876" exitCode=143 Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.791466 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8682f23a-0622-456d-b9ca-59ffb9fdad24","Type":"ContainerDied","Data":"3ffa118f5f527489dc345765e83342efe059f394a1846bd7fa6ddd85e3436876"} Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.796239 4940 generic.go:334] "Generic (PLEG): container finished" podID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerID="b6e1c4625e97355a0fe0b8a594ce2614bec2811d433a4b978c8ba509446fe28f" exitCode=0 Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.796360 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerDied","Data":"b6e1c4625e97355a0fe0b8a594ce2614bec2811d433a4b978c8ba509446fe28f"} Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.796414 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f","Type":"ContainerDied","Data":"f022f30d52394bdabd252a341f52e7753957dce0be1a3e665d0482182ca7338e"} Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.796433 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f022f30d52394bdabd252a341f52e7753957dce0be1a3e665d0482182ca7338e" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.848364 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.944083 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-log-httpd\") pod \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.944133 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmgp5\" (UniqueName: \"kubernetes.io/projected/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-kube-api-access-pmgp5\") pod \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.944234 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-run-httpd\") pod \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.944482 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" (UID: "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.944538 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" (UID: "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.944575 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-sg-core-conf-yaml\") pod \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.945170 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-ceilometer-tls-certs\") pod \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.945212 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-scripts\") pod \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.945230 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-config-data\") pod \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.945290 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-combined-ca-bundle\") pod \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\" (UID: \"8bdfbd0a-7100-46b7-a443-6c9ddbafb98f\") " Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.945767 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.945784 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.950121 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-scripts" (OuterVolumeSpecName: "scripts") pod "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" (UID: "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.951310 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-kube-api-access-pmgp5" (OuterVolumeSpecName: "kube-api-access-pmgp5") pod "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" (UID: "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f"). InnerVolumeSpecName "kube-api-access-pmgp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:33 crc kubenswrapper[4940]: I1126 07:15:33.976773 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" (UID: "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.009792 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" (UID: "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.047335 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmgp5\" (UniqueName: \"kubernetes.io/projected/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-kube-api-access-pmgp5\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.047369 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.047378 4940 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.047388 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.055986 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-config-data" (OuterVolumeSpecName: "config-data") pod "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" (UID: "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.057994 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" (UID: "8bdfbd0a-7100-46b7-a443-6c9ddbafb98f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.149555 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.149586 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.312006 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.319380 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.327333 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.341635 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.353468 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qh65\" (UniqueName: \"kubernetes.io/projected/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-kube-api-access-2qh65\") pod \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\" (UID: \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.353627 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-operator-scripts\") pod \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\" (UID: \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.353788 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/742ee5ec-ca7e-47f2-becd-5352810d275f-operator-scripts\") pod \"742ee5ec-ca7e-47f2-becd-5352810d275f\" (UID: \"742ee5ec-ca7e-47f2-becd-5352810d275f\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.353804 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9p7j\" (UniqueName: \"kubernetes.io/projected/742ee5ec-ca7e-47f2-becd-5352810d275f-kube-api-access-r9p7j\") pod \"742ee5ec-ca7e-47f2-becd-5352810d275f\" (UID: \"742ee5ec-ca7e-47f2-becd-5352810d275f\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.353853 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-operator-scripts\") pod \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\" (UID: \"95b33e62-98cb-4ebe-8fa9-e1e762ee3352\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.353877 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ks8r9\" (UniqueName: \"kubernetes.io/projected/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-kube-api-access-ks8r9\") pod \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\" (UID: \"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.354189 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.355386 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8" (UID: "f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.355530 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/742ee5ec-ca7e-47f2-becd-5352810d275f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "742ee5ec-ca7e-47f2-becd-5352810d275f" (UID: "742ee5ec-ca7e-47f2-becd-5352810d275f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.355980 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "95b33e62-98cb-4ebe-8fa9-e1e762ee3352" (UID: "95b33e62-98cb-4ebe-8fa9-e1e762ee3352"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.361752 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-kube-api-access-ks8r9" (OuterVolumeSpecName: "kube-api-access-ks8r9") pod "f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8" (UID: "f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8"). InnerVolumeSpecName "kube-api-access-ks8r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.361823 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/742ee5ec-ca7e-47f2-becd-5352810d275f-kube-api-access-r9p7j" (OuterVolumeSpecName: "kube-api-access-r9p7j") pod "742ee5ec-ca7e-47f2-becd-5352810d275f" (UID: "742ee5ec-ca7e-47f2-becd-5352810d275f"). InnerVolumeSpecName "kube-api-access-r9p7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.364279 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-kube-api-access-2qh65" (OuterVolumeSpecName: "kube-api-access-2qh65") pod "95b33e62-98cb-4ebe-8fa9-e1e762ee3352" (UID: "95b33e62-98cb-4ebe-8fa9-e1e762ee3352"). InnerVolumeSpecName "kube-api-access-2qh65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.392382 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.455006 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4vgm\" (UniqueName: \"kubernetes.io/projected/39eba712-649b-4509-be2d-72bb08e292e5-kube-api-access-k4vgm\") pod \"39eba712-649b-4509-be2d-72bb08e292e5\" (UID: \"39eba712-649b-4509-be2d-72bb08e292e5\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.455141 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2fnc\" (UniqueName: \"kubernetes.io/projected/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-kube-api-access-f2fnc\") pod \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\" (UID: \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.455176 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-operator-scripts\") pod \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\" (UID: \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.455198 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-operator-scripts\") pod \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\" (UID: \"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.455272 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4fzq\" (UniqueName: \"kubernetes.io/projected/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-kube-api-access-h4fzq\") pod \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\" (UID: \"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.455350 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39eba712-649b-4509-be2d-72bb08e292e5-operator-scripts\") pod \"39eba712-649b-4509-be2d-72bb08e292e5\" (UID: \"39eba712-649b-4509-be2d-72bb08e292e5\") " Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.455589 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b" (UID: "2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.455621 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1" (UID: "1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456074 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39eba712-649b-4509-be2d-72bb08e292e5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "39eba712-649b-4509-be2d-72bb08e292e5" (UID: "39eba712-649b-4509-be2d-72bb08e292e5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456468 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/742ee5ec-ca7e-47f2-becd-5352810d275f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456487 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9p7j\" (UniqueName: \"kubernetes.io/projected/742ee5ec-ca7e-47f2-becd-5352810d275f-kube-api-access-r9p7j\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456498 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456507 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456515 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456523 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ks8r9\" (UniqueName: \"kubernetes.io/projected/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-kube-api-access-ks8r9\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456531 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qh65\" (UniqueName: \"kubernetes.io/projected/95b33e62-98cb-4ebe-8fa9-e1e762ee3352-kube-api-access-2qh65\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456539 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.456547 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39eba712-649b-4509-be2d-72bb08e292e5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.460199 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39eba712-649b-4509-be2d-72bb08e292e5-kube-api-access-k4vgm" (OuterVolumeSpecName: "kube-api-access-k4vgm") pod "39eba712-649b-4509-be2d-72bb08e292e5" (UID: "39eba712-649b-4509-be2d-72bb08e292e5"). InnerVolumeSpecName "kube-api-access-k4vgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.460232 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-kube-api-access-f2fnc" (OuterVolumeSpecName: "kube-api-access-f2fnc") pod "1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1" (UID: "1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1"). InnerVolumeSpecName "kube-api-access-f2fnc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.461288 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-kube-api-access-h4fzq" (OuterVolumeSpecName: "kube-api-access-h4fzq") pod "2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b" (UID: "2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b"). InnerVolumeSpecName "kube-api-access-h4fzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.558395 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4vgm\" (UniqueName: \"kubernetes.io/projected/39eba712-649b-4509-be2d-72bb08e292e5-kube-api-access-k4vgm\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.558431 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2fnc\" (UniqueName: \"kubernetes.io/projected/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1-kube-api-access-f2fnc\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.558440 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4fzq\" (UniqueName: \"kubernetes.io/projected/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b-kube-api-access-h4fzq\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.807790 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-bmbz8" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.807752 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-bmbz8" event={"ID":"f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8","Type":"ContainerDied","Data":"b04a7d3cb7942f8062b9e72013aed2b10fd87adf70385de223194a7bbb71f242"} Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.808267 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b04a7d3cb7942f8062b9e72013aed2b10fd87adf70385de223194a7bbb71f242" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.809307 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-gb562" event={"ID":"2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b","Type":"ContainerDied","Data":"eec88fb839a88d9d429499a00a34990e628fa6af9d44149681700cd1a6fa6914"} Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.809340 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eec88fb839a88d9d429499a00a34990e628fa6af9d44149681700cd1a6fa6914" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.809341 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-gb562" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.811532 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pp2g5" event={"ID":"1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1","Type":"ContainerDied","Data":"3b7646bd5977a34a4f27458fd0cd6f389b658ea3153d29e349f0a8d64cd9bd2c"} Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.811582 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b7646bd5977a34a4f27458fd0cd6f389b658ea3153d29e349f0a8d64cd9bd2c" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.811555 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pp2g5" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.813393 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" event={"ID":"39eba712-649b-4509-be2d-72bb08e292e5","Type":"ContainerDied","Data":"1438885abc72258e32ff8a64d0f315ab63b633324177c52f79cbf3a3b0a077c4"} Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.813427 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1438885abc72258e32ff8a64d0f315ab63b633324177c52f79cbf3a3b0a077c4" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.813488 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ee23-account-create-update-j7nlg" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.823224 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-31fa-account-create-update-vxmcw" event={"ID":"95b33e62-98cb-4ebe-8fa9-e1e762ee3352","Type":"ContainerDied","Data":"d3a4c51fb8f023c84ea6094a363512ab12eaf9ded150edf2b232c962830fcb75"} Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.823302 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3a4c51fb8f023c84ea6094a363512ab12eaf9ded150edf2b232c962830fcb75" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.823265 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-31fa-account-create-update-vxmcw" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.825568 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.825593 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d6fe-account-create-update-v2428" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.825631 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d6fe-account-create-update-v2428" event={"ID":"742ee5ec-ca7e-47f2-becd-5352810d275f","Type":"ContainerDied","Data":"f9bd24e02b520124be4a43c202a621cc601c8a78d9ffc2ee990a19e2c4180046"} Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.825681 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9bd24e02b520124be4a43c202a621cc601c8a78d9ffc2ee990a19e2c4180046" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.887048 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.902320 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.908990 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909487 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="742ee5ec-ca7e-47f2-becd-5352810d275f" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909515 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="742ee5ec-ca7e-47f2-becd-5352810d275f" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909533 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="ceilometer-notification-agent" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909544 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="ceilometer-notification-agent" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909568 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="ceilometer-central-agent" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909577 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="ceilometer-central-agent" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909595 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909603 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909616 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95b33e62-98cb-4ebe-8fa9-e1e762ee3352" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909626 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="95b33e62-98cb-4ebe-8fa9-e1e762ee3352" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909646 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909655 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909671 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909679 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909694 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="proxy-httpd" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909706 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="proxy-httpd" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909720 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="sg-core" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909730 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="sg-core" Nov 26 07:15:34 crc kubenswrapper[4940]: E1126 07:15:34.909744 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39eba712-649b-4509-be2d-72bb08e292e5" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909752 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="39eba712-649b-4509-be2d-72bb08e292e5" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909961 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="ceilometer-central-agent" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.909985 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="ceilometer-notification-agent" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.910004 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.910016 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.910024 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b" containerName="mariadb-database-create" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.910033 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="95b33e62-98cb-4ebe-8fa9-e1e762ee3352" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.910055 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="742ee5ec-ca7e-47f2-becd-5352810d275f" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.910081 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="sg-core" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.910095 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" containerName="proxy-httpd" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.910106 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="39eba712-649b-4509-be2d-72bb08e292e5" containerName="mariadb-account-create-update" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.913159 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.915534 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.915884 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.916190 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.919890 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.965985 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8pnt\" (UniqueName: \"kubernetes.io/projected/488524db-1b89-4a01-b162-4c7fcd2aac3b-kube-api-access-p8pnt\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.966041 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-scripts\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.966119 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-run-httpd\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.966152 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-log-httpd\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.966189 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.966220 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.966246 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-config-data\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:34 crc kubenswrapper[4940]: I1126 07:15:34.966326 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.067537 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.067582 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-config-data\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.067655 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.067696 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8pnt\" (UniqueName: \"kubernetes.io/projected/488524db-1b89-4a01-b162-4c7fcd2aac3b-kube-api-access-p8pnt\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.067718 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-scripts\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.067764 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-run-httpd\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.067789 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-log-httpd\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.067814 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.068550 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-run-httpd\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.068615 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-log-httpd\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.071140 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.071267 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.072453 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-scripts\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.073230 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-config-data\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.074116 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.083824 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8pnt\" (UniqueName: \"kubernetes.io/projected/488524db-1b89-4a01-b162-4c7fcd2aac3b-kube-api-access-p8pnt\") pod \"ceilometer-0\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.176617 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bdfbd0a-7100-46b7-a443-6c9ddbafb98f" path="/var/lib/kubelet/pods/8bdfbd0a-7100-46b7-a443-6c9ddbafb98f/volumes" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.326063 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:35 crc kubenswrapper[4940]: I1126 07:15:35.975160 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:35 crc kubenswrapper[4940]: W1126 07:15:35.978218 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod488524db_1b89_4a01_b162_4c7fcd2aac3b.slice/crio-83ff2f8747059d1ff5febe0b5aac7bffe28377929bfc3ee82c5dce6bcf4e9998 WatchSource:0}: Error finding container 83ff2f8747059d1ff5febe0b5aac7bffe28377929bfc3ee82c5dce6bcf4e9998: Status 404 returned error can't find the container with id 83ff2f8747059d1ff5febe0b5aac7bffe28377929bfc3ee82c5dce6bcf4e9998 Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.189334 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.189554 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="058d948e-a28a-43f1-91f6-d310742359ef" containerName="glance-log" containerID="cri-o://8a1fcf1dd9e392bbbd9e6281fa487af8c1811e6f62a0ab8572b68bcaad1323ba" gracePeriod=30 Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.189818 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="058d948e-a28a-43f1-91f6-d310742359ef" containerName="glance-httpd" containerID="cri-o://51ad24223d851461f35281c479e625834cc22512617bb363aa06d8deb4fd3260" gracePeriod=30 Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.204924 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bf4g9"] Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.206027 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.208329 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-xx9q7" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.208511 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.209298 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.212284 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bf4g9"] Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.290252 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-scripts\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.290633 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwzws\" (UniqueName: \"kubernetes.io/projected/b4735f75-b279-4137-9496-36fd315fa8e8-kube-api-access-lwzws\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.290653 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.290717 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-config-data\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.392434 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwzws\" (UniqueName: \"kubernetes.io/projected/b4735f75-b279-4137-9496-36fd315fa8e8-kube-api-access-lwzws\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.392488 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.392516 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-config-data\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.392658 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-scripts\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.399649 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-scripts\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.400545 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.402662 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-config-data\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.418188 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwzws\" (UniqueName: \"kubernetes.io/projected/b4735f75-b279-4137-9496-36fd315fa8e8-kube-api-access-lwzws\") pod \"nova-cell0-conductor-db-sync-bf4g9\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.528716 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.847409 4940 generic.go:334] "Generic (PLEG): container finished" podID="058d948e-a28a-43f1-91f6-d310742359ef" containerID="8a1fcf1dd9e392bbbd9e6281fa487af8c1811e6f62a0ab8572b68bcaad1323ba" exitCode=143 Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.847549 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"058d948e-a28a-43f1-91f6-d310742359ef","Type":"ContainerDied","Data":"8a1fcf1dd9e392bbbd9e6281fa487af8c1811e6f62a0ab8572b68bcaad1323ba"} Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.851410 4940 generic.go:334] "Generic (PLEG): container finished" podID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerID="c699e8ed09c1b1959948a2a83983b2a73362370ad13942383ea39dc8e7bc31d5" exitCode=0 Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.851507 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8682f23a-0622-456d-b9ca-59ffb9fdad24","Type":"ContainerDied","Data":"c699e8ed09c1b1959948a2a83983b2a73362370ad13942383ea39dc8e7bc31d5"} Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.853182 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerStarted","Data":"3b6aed4176b31bda53c9a4ce5995546351eb5863484f4b82c81190dcddab67a8"} Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.853246 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerStarted","Data":"83ff2f8747059d1ff5febe0b5aac7bffe28377929bfc3ee82c5dce6bcf4e9998"} Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.935525 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:36 crc kubenswrapper[4940]: I1126 07:15:36.973883 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bf4g9"] Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.160334 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.213080 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-config-data\") pod \"8682f23a-0622-456d-b9ca-59ffb9fdad24\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.213149 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-scripts\") pod \"8682f23a-0622-456d-b9ca-59ffb9fdad24\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.213187 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnczv\" (UniqueName: \"kubernetes.io/projected/8682f23a-0622-456d-b9ca-59ffb9fdad24-kube-api-access-vnczv\") pod \"8682f23a-0622-456d-b9ca-59ffb9fdad24\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.213246 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-public-tls-certs\") pod \"8682f23a-0622-456d-b9ca-59ffb9fdad24\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.214330 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-combined-ca-bundle\") pod \"8682f23a-0622-456d-b9ca-59ffb9fdad24\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.214424 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-httpd-run\") pod \"8682f23a-0622-456d-b9ca-59ffb9fdad24\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.214494 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"8682f23a-0622-456d-b9ca-59ffb9fdad24\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.214568 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-logs\") pod \"8682f23a-0622-456d-b9ca-59ffb9fdad24\" (UID: \"8682f23a-0622-456d-b9ca-59ffb9fdad24\") " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.214906 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8682f23a-0622-456d-b9ca-59ffb9fdad24" (UID: "8682f23a-0622-456d-b9ca-59ffb9fdad24"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.215417 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.216168 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-logs" (OuterVolumeSpecName: "logs") pod "8682f23a-0622-456d-b9ca-59ffb9fdad24" (UID: "8682f23a-0622-456d-b9ca-59ffb9fdad24"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.247423 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8682f23a-0622-456d-b9ca-59ffb9fdad24-kube-api-access-vnczv" (OuterVolumeSpecName: "kube-api-access-vnczv") pod "8682f23a-0622-456d-b9ca-59ffb9fdad24" (UID: "8682f23a-0622-456d-b9ca-59ffb9fdad24"). InnerVolumeSpecName "kube-api-access-vnczv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.247516 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-scripts" (OuterVolumeSpecName: "scripts") pod "8682f23a-0622-456d-b9ca-59ffb9fdad24" (UID: "8682f23a-0622-456d-b9ca-59ffb9fdad24"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.250189 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "8682f23a-0622-456d-b9ca-59ffb9fdad24" (UID: "8682f23a-0622-456d-b9ca-59ffb9fdad24"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.266124 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8682f23a-0622-456d-b9ca-59ffb9fdad24" (UID: "8682f23a-0622-456d-b9ca-59ffb9fdad24"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.290522 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-config-data" (OuterVolumeSpecName: "config-data") pod "8682f23a-0622-456d-b9ca-59ffb9fdad24" (UID: "8682f23a-0622-456d-b9ca-59ffb9fdad24"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.317120 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8682f23a-0622-456d-b9ca-59ffb9fdad24" (UID: "8682f23a-0622-456d-b9ca-59ffb9fdad24"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.317689 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.317723 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8682f23a-0622-456d-b9ca-59ffb9fdad24-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.317737 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.317748 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.317774 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnczv\" (UniqueName: \"kubernetes.io/projected/8682f23a-0622-456d-b9ca-59ffb9fdad24-kube-api-access-vnczv\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.317788 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.317798 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8682f23a-0622-456d-b9ca-59ffb9fdad24-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.341141 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.418957 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.868635 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" event={"ID":"b4735f75-b279-4137-9496-36fd315fa8e8","Type":"ContainerStarted","Data":"7ee4feb68bdc5c33eee39bca202cf71c97f342e4900163b57e17f573d49616ef"} Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.871946 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.871944 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8682f23a-0622-456d-b9ca-59ffb9fdad24","Type":"ContainerDied","Data":"9bcb321aa1700ff6b6d7c386295b1738e703ed0628eaef16b93b6a4945efcba0"} Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.872189 4940 scope.go:117] "RemoveContainer" containerID="c699e8ed09c1b1959948a2a83983b2a73362370ad13942383ea39dc8e7bc31d5" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.876643 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerStarted","Data":"d3bc15f03b5a60585b9be81328f45e115110e8175cc17e6cb13b4fd946d1679f"} Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.916626 4940 scope.go:117] "RemoveContainer" containerID="3ffa118f5f527489dc345765e83342efe059f394a1846bd7fa6ddd85e3436876" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.938108 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.965786 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.988271 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:15:37 crc kubenswrapper[4940]: E1126 07:15:37.989337 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerName="glance-httpd" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.989363 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerName="glance-httpd" Nov 26 07:15:37 crc kubenswrapper[4940]: E1126 07:15:37.989385 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerName="glance-log" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.989392 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerName="glance-log" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.989708 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerName="glance-httpd" Nov 26 07:15:37 crc kubenswrapper[4940]: I1126 07:15:37.989732 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" containerName="glance-log" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.002909 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.003089 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.025969 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.027938 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.139209 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-scripts\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.139320 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-config-data\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.139349 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.139397 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-logs\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.139429 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.139444 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.139461 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.139501 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddp4z\" (UniqueName: \"kubernetes.io/projected/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-kube-api-access-ddp4z\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.240801 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-config-data\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.240847 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.240898 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-logs\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.240931 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.240946 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.240961 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.240999 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddp4z\" (UniqueName: \"kubernetes.io/projected/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-kube-api-access-ddp4z\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.241018 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-scripts\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.242705 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.242933 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-logs\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.243194 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.245694 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.248193 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-config-data\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.249277 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-scripts\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.262553 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.264510 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddp4z\" (UniqueName: \"kubernetes.io/projected/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-kube-api-access-ddp4z\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.275662 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.341852 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.894416 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerStarted","Data":"5bb69d6ce7ba78c126a4d08b2298a833cc02b3a32bb87e85f8648aaf6160041a"} Nov 26 07:15:38 crc kubenswrapper[4940]: I1126 07:15:38.920485 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:15:38 crc kubenswrapper[4940]: W1126 07:15:38.929943 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod605a7fbf_cd1f_4e44_a85c_0c1a7f1b87cf.slice/crio-5ca16bab69b106e0fb87d8c6ff7e4b523b8810041317e2bcfdd36df790d14ad5 WatchSource:0}: Error finding container 5ca16bab69b106e0fb87d8c6ff7e4b523b8810041317e2bcfdd36df790d14ad5: Status 404 returned error can't find the container with id 5ca16bab69b106e0fb87d8c6ff7e4b523b8810041317e2bcfdd36df790d14ad5 Nov 26 07:15:39 crc kubenswrapper[4940]: I1126 07:15:39.176514 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8682f23a-0622-456d-b9ca-59ffb9fdad24" path="/var/lib/kubelet/pods/8682f23a-0622-456d-b9ca-59ffb9fdad24/volumes" Nov 26 07:15:39 crc kubenswrapper[4940]: I1126 07:15:39.908847 4940 generic.go:334] "Generic (PLEG): container finished" podID="058d948e-a28a-43f1-91f6-d310742359ef" containerID="51ad24223d851461f35281c479e625834cc22512617bb363aa06d8deb4fd3260" exitCode=0 Nov 26 07:15:39 crc kubenswrapper[4940]: I1126 07:15:39.908933 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"058d948e-a28a-43f1-91f6-d310742359ef","Type":"ContainerDied","Data":"51ad24223d851461f35281c479e625834cc22512617bb363aa06d8deb4fd3260"} Nov 26 07:15:39 crc kubenswrapper[4940]: I1126 07:15:39.911783 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf","Type":"ContainerStarted","Data":"5d1a5be719119c7cfea6def8095a88a5e3c62d7b6d09bd5c94f113735004fb3f"} Nov 26 07:15:39 crc kubenswrapper[4940]: I1126 07:15:39.911828 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf","Type":"ContainerStarted","Data":"5ca16bab69b106e0fb87d8c6ff7e4b523b8810041317e2bcfdd36df790d14ad5"} Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.434729 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.497448 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-internal-tls-certs\") pod \"058d948e-a28a-43f1-91f6-d310742359ef\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.497526 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-scripts\") pod \"058d948e-a28a-43f1-91f6-d310742359ef\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.497604 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-httpd-run\") pod \"058d948e-a28a-43f1-91f6-d310742359ef\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.497637 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"058d948e-a28a-43f1-91f6-d310742359ef\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.497758 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zz8sk\" (UniqueName: \"kubernetes.io/projected/058d948e-a28a-43f1-91f6-d310742359ef-kube-api-access-zz8sk\") pod \"058d948e-a28a-43f1-91f6-d310742359ef\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.497843 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-config-data\") pod \"058d948e-a28a-43f1-91f6-d310742359ef\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.498394 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-logs\") pod \"058d948e-a28a-43f1-91f6-d310742359ef\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.498435 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-combined-ca-bundle\") pod \"058d948e-a28a-43f1-91f6-d310742359ef\" (UID: \"058d948e-a28a-43f1-91f6-d310742359ef\") " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.498112 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "058d948e-a28a-43f1-91f6-d310742359ef" (UID: "058d948e-a28a-43f1-91f6-d310742359ef"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.498916 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-logs" (OuterVolumeSpecName: "logs") pod "058d948e-a28a-43f1-91f6-d310742359ef" (UID: "058d948e-a28a-43f1-91f6-d310742359ef"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.499281 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.499293 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/058d948e-a28a-43f1-91f6-d310742359ef-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.503351 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-scripts" (OuterVolumeSpecName: "scripts") pod "058d948e-a28a-43f1-91f6-d310742359ef" (UID: "058d948e-a28a-43f1-91f6-d310742359ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.504280 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "058d948e-a28a-43f1-91f6-d310742359ef" (UID: "058d948e-a28a-43f1-91f6-d310742359ef"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.507243 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/058d948e-a28a-43f1-91f6-d310742359ef-kube-api-access-zz8sk" (OuterVolumeSpecName: "kube-api-access-zz8sk") pod "058d948e-a28a-43f1-91f6-d310742359ef" (UID: "058d948e-a28a-43f1-91f6-d310742359ef"). InnerVolumeSpecName "kube-api-access-zz8sk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.567944 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "058d948e-a28a-43f1-91f6-d310742359ef" (UID: "058d948e-a28a-43f1-91f6-d310742359ef"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.587248 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "058d948e-a28a-43f1-91f6-d310742359ef" (UID: "058d948e-a28a-43f1-91f6-d310742359ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.602592 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zz8sk\" (UniqueName: \"kubernetes.io/projected/058d948e-a28a-43f1-91f6-d310742359ef-kube-api-access-zz8sk\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.602627 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.602636 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.602645 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.602675 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.614189 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-config-data" (OuterVolumeSpecName: "config-data") pod "058d948e-a28a-43f1-91f6-d310742359ef" (UID: "058d948e-a28a-43f1-91f6-d310742359ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.644787 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.704233 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.704265 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/058d948e-a28a-43f1-91f6-d310742359ef-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.922142 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"058d948e-a28a-43f1-91f6-d310742359ef","Type":"ContainerDied","Data":"db1432ea91ace69890bd709f4b407b488030b20bee761e31c16f169a68c5c37c"} Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.922197 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.922205 4940 scope.go:117] "RemoveContainer" containerID="51ad24223d851461f35281c479e625834cc22512617bb363aa06d8deb4fd3260" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.926418 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerStarted","Data":"5d600a5c8a1ae58a2c3a705d11d9aa5d2805f6edeab9cc1d8db8af2db70df3cf"} Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.926522 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="ceilometer-central-agent" containerID="cri-o://3b6aed4176b31bda53c9a4ce5995546351eb5863484f4b82c81190dcddab67a8" gracePeriod=30 Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.926548 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="sg-core" containerID="cri-o://5bb69d6ce7ba78c126a4d08b2298a833cc02b3a32bb87e85f8648aaf6160041a" gracePeriod=30 Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.926587 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="proxy-httpd" containerID="cri-o://5d600a5c8a1ae58a2c3a705d11d9aa5d2805f6edeab9cc1d8db8af2db70df3cf" gracePeriod=30 Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.926556 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.926608 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="ceilometer-notification-agent" containerID="cri-o://d3bc15f03b5a60585b9be81328f45e115110e8175cc17e6cb13b4fd946d1679f" gracePeriod=30 Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.930708 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf","Type":"ContainerStarted","Data":"1b14a7d61d2e90b0760d7d58727eb7a9fea2999afab2a7de8b185771cfd9eea1"} Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.956456 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.144577266 podStartE2EDuration="6.956414163s" podCreationTimestamp="2025-11-26 07:15:34 +0000 UTC" firstStartedPulling="2025-11-26 07:15:35.980243518 +0000 UTC m=+1237.500385137" lastFinishedPulling="2025-11-26 07:15:39.792080425 +0000 UTC m=+1241.312222034" observedRunningTime="2025-11-26 07:15:40.952936713 +0000 UTC m=+1242.473078332" watchObservedRunningTime="2025-11-26 07:15:40.956414163 +0000 UTC m=+1242.476555792" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.972535 4940 scope.go:117] "RemoveContainer" containerID="8a1fcf1dd9e392bbbd9e6281fa487af8c1811e6f62a0ab8572b68bcaad1323ba" Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.985063 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:15:40 crc kubenswrapper[4940]: I1126 07:15:40.999113 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.009607 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.009797 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.009775706 podStartE2EDuration="4.009775706s" podCreationTimestamp="2025-11-26 07:15:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:15:40.997218777 +0000 UTC m=+1242.517360396" watchObservedRunningTime="2025-11-26 07:15:41.009775706 +0000 UTC m=+1242.529917325" Nov 26 07:15:41 crc kubenswrapper[4940]: E1126 07:15:41.010161 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="058d948e-a28a-43f1-91f6-d310742359ef" containerName="glance-log" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.010184 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="058d948e-a28a-43f1-91f6-d310742359ef" containerName="glance-log" Nov 26 07:15:41 crc kubenswrapper[4940]: E1126 07:15:41.010202 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="058d948e-a28a-43f1-91f6-d310742359ef" containerName="glance-httpd" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.010210 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="058d948e-a28a-43f1-91f6-d310742359ef" containerName="glance-httpd" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.010421 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="058d948e-a28a-43f1-91f6-d310742359ef" containerName="glance-httpd" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.010463 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="058d948e-a28a-43f1-91f6-d310742359ef" containerName="glance-log" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.011668 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.018534 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.018886 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.032206 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.190894 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="058d948e-a28a-43f1-91f6-d310742359ef" path="/var/lib/kubelet/pods/058d948e-a28a-43f1-91f6-d310742359ef/volumes" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.216379 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-logs\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.216451 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.216549 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.216601 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.216722 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.216784 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwj7t\" (UniqueName: \"kubernetes.io/projected/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-kube-api-access-xwj7t\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.216820 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.216872 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319085 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319150 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319173 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-logs\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319227 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319252 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319275 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319340 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319372 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwj7t\" (UniqueName: \"kubernetes.io/projected/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-kube-api-access-xwj7t\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319630 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.319708 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-logs\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.320672 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.323966 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.324017 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.324480 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.324931 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.339994 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwj7t\" (UniqueName: \"kubernetes.io/projected/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-kube-api-access-xwj7t\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.355948 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.376274 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.946386 4940 generic.go:334] "Generic (PLEG): container finished" podID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerID="5d600a5c8a1ae58a2c3a705d11d9aa5d2805f6edeab9cc1d8db8af2db70df3cf" exitCode=0 Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.946424 4940 generic.go:334] "Generic (PLEG): container finished" podID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerID="5bb69d6ce7ba78c126a4d08b2298a833cc02b3a32bb87e85f8648aaf6160041a" exitCode=2 Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.946437 4940 generic.go:334] "Generic (PLEG): container finished" podID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerID="d3bc15f03b5a60585b9be81328f45e115110e8175cc17e6cb13b4fd946d1679f" exitCode=0 Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.946498 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerDied","Data":"5d600a5c8a1ae58a2c3a705d11d9aa5d2805f6edeab9cc1d8db8af2db70df3cf"} Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.946526 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerDied","Data":"5bb69d6ce7ba78c126a4d08b2298a833cc02b3a32bb87e85f8648aaf6160041a"} Nov 26 07:15:41 crc kubenswrapper[4940]: I1126 07:15:41.946539 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerDied","Data":"d3bc15f03b5a60585b9be81328f45e115110e8175cc17e6cb13b4fd946d1679f"} Nov 26 07:15:44 crc kubenswrapper[4940]: I1126 07:15:44.992606 4940 generic.go:334] "Generic (PLEG): container finished" podID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerID="3b6aed4176b31bda53c9a4ce5995546351eb5863484f4b82c81190dcddab67a8" exitCode=0 Nov 26 07:15:44 crc kubenswrapper[4940]: I1126 07:15:44.992660 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerDied","Data":"3b6aed4176b31bda53c9a4ce5995546351eb5863484f4b82c81190dcddab67a8"} Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.194607 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.391873 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-ceilometer-tls-certs\") pod \"488524db-1b89-4a01-b162-4c7fcd2aac3b\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.391962 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-scripts\") pod \"488524db-1b89-4a01-b162-4c7fcd2aac3b\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.391995 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-log-httpd\") pod \"488524db-1b89-4a01-b162-4c7fcd2aac3b\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.392102 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8pnt\" (UniqueName: \"kubernetes.io/projected/488524db-1b89-4a01-b162-4c7fcd2aac3b-kube-api-access-p8pnt\") pod \"488524db-1b89-4a01-b162-4c7fcd2aac3b\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.392260 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-sg-core-conf-yaml\") pod \"488524db-1b89-4a01-b162-4c7fcd2aac3b\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.392330 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-run-httpd\") pod \"488524db-1b89-4a01-b162-4c7fcd2aac3b\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.392377 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-config-data\") pod \"488524db-1b89-4a01-b162-4c7fcd2aac3b\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.392431 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-combined-ca-bundle\") pod \"488524db-1b89-4a01-b162-4c7fcd2aac3b\" (UID: \"488524db-1b89-4a01-b162-4c7fcd2aac3b\") " Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.392571 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "488524db-1b89-4a01-b162-4c7fcd2aac3b" (UID: "488524db-1b89-4a01-b162-4c7fcd2aac3b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.392668 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "488524db-1b89-4a01-b162-4c7fcd2aac3b" (UID: "488524db-1b89-4a01-b162-4c7fcd2aac3b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.393263 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.393501 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/488524db-1b89-4a01-b162-4c7fcd2aac3b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.396582 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/488524db-1b89-4a01-b162-4c7fcd2aac3b-kube-api-access-p8pnt" (OuterVolumeSpecName: "kube-api-access-p8pnt") pod "488524db-1b89-4a01-b162-4c7fcd2aac3b" (UID: "488524db-1b89-4a01-b162-4c7fcd2aac3b"). InnerVolumeSpecName "kube-api-access-p8pnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.397753 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-scripts" (OuterVolumeSpecName: "scripts") pod "488524db-1b89-4a01-b162-4c7fcd2aac3b" (UID: "488524db-1b89-4a01-b162-4c7fcd2aac3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.419726 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "488524db-1b89-4a01-b162-4c7fcd2aac3b" (UID: "488524db-1b89-4a01-b162-4c7fcd2aac3b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.442181 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "488524db-1b89-4a01-b162-4c7fcd2aac3b" (UID: "488524db-1b89-4a01-b162-4c7fcd2aac3b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.473000 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "488524db-1b89-4a01-b162-4c7fcd2aac3b" (UID: "488524db-1b89-4a01-b162-4c7fcd2aac3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.483399 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-config-data" (OuterVolumeSpecName: "config-data") pod "488524db-1b89-4a01-b162-4c7fcd2aac3b" (UID: "488524db-1b89-4a01-b162-4c7fcd2aac3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.495828 4940 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.495892 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.495904 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8pnt\" (UniqueName: \"kubernetes.io/projected/488524db-1b89-4a01-b162-4c7fcd2aac3b-kube-api-access-p8pnt\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.495914 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.495923 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.495932 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/488524db-1b89-4a01-b162-4c7fcd2aac3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:45 crc kubenswrapper[4940]: I1126 07:15:45.625245 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:15:45 crc kubenswrapper[4940]: W1126 07:15:45.633122 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79fc2ca6_8d13_44e6_83da_033c7f2d7df3.slice/crio-eddd25e5f77c7035bd5cc3a39e05cc12ff82a824b564d897201f5481a00bb890 WatchSource:0}: Error finding container eddd25e5f77c7035bd5cc3a39e05cc12ff82a824b564d897201f5481a00bb890: Status 404 returned error can't find the container with id eddd25e5f77c7035bd5cc3a39e05cc12ff82a824b564d897201f5481a00bb890 Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.006342 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"488524db-1b89-4a01-b162-4c7fcd2aac3b","Type":"ContainerDied","Data":"83ff2f8747059d1ff5febe0b5aac7bffe28377929bfc3ee82c5dce6bcf4e9998"} Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.006423 4940 scope.go:117] "RemoveContainer" containerID="5d600a5c8a1ae58a2c3a705d11d9aa5d2805f6edeab9cc1d8db8af2db70df3cf" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.006592 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.012354 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" event={"ID":"b4735f75-b279-4137-9496-36fd315fa8e8","Type":"ContainerStarted","Data":"c4639981f7e85fc58d57349ae26c0c6d76318e4de9c80c781725c8673d4d82b5"} Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.014061 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"79fc2ca6-8d13-44e6-83da-033c7f2d7df3","Type":"ContainerStarted","Data":"eddd25e5f77c7035bd5cc3a39e05cc12ff82a824b564d897201f5481a00bb890"} Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.037955 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" podStartSLOduration=1.9254691240000001 podStartE2EDuration="10.037936259s" podCreationTimestamp="2025-11-26 07:15:36 +0000 UTC" firstStartedPulling="2025-11-26 07:15:37.016557396 +0000 UTC m=+1238.536699015" lastFinishedPulling="2025-11-26 07:15:45.129024531 +0000 UTC m=+1246.649166150" observedRunningTime="2025-11-26 07:15:46.032223667 +0000 UTC m=+1247.552365286" watchObservedRunningTime="2025-11-26 07:15:46.037936259 +0000 UTC m=+1247.558077878" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.038008 4940 scope.go:117] "RemoveContainer" containerID="5bb69d6ce7ba78c126a4d08b2298a833cc02b3a32bb87e85f8648aaf6160041a" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.053845 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.065067 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.083685 4940 scope.go:117] "RemoveContainer" containerID="d3bc15f03b5a60585b9be81328f45e115110e8175cc17e6cb13b4fd946d1679f" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.083828 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:46 crc kubenswrapper[4940]: E1126 07:15:46.084338 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="ceilometer-central-agent" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.084361 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="ceilometer-central-agent" Nov 26 07:15:46 crc kubenswrapper[4940]: E1126 07:15:46.084378 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="sg-core" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.084405 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="sg-core" Nov 26 07:15:46 crc kubenswrapper[4940]: E1126 07:15:46.084420 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="ceilometer-notification-agent" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.084430 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="ceilometer-notification-agent" Nov 26 07:15:46 crc kubenswrapper[4940]: E1126 07:15:46.084445 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="proxy-httpd" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.084454 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="proxy-httpd" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.084676 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="sg-core" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.084691 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="ceilometer-central-agent" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.084700 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="ceilometer-notification-agent" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.084720 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" containerName="proxy-httpd" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.087138 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.089532 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.089568 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.093642 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.116431 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.143860 4940 scope.go:117] "RemoveContainer" containerID="3b6aed4176b31bda53c9a4ce5995546351eb5863484f4b82c81190dcddab67a8" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.209293 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.209369 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.209413 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-scripts\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.209495 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-config-data\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.209559 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.209603 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-run-httpd\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.209650 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-log-httpd\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.209788 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6wn8\" (UniqueName: \"kubernetes.io/projected/b356de7e-d5db-470d-863d-21a4dae15a4c-kube-api-access-j6wn8\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.311824 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.311900 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-run-httpd\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.311959 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-log-httpd\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.312518 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6wn8\" (UniqueName: \"kubernetes.io/projected/b356de7e-d5db-470d-863d-21a4dae15a4c-kube-api-access-j6wn8\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.312597 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.312639 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.312678 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-scripts\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.312749 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-config-data\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.314293 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-log-httpd\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.315509 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-run-httpd\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.318575 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-config-data\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.325298 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.331433 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-scripts\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.331575 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.332694 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.334063 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6wn8\" (UniqueName: \"kubernetes.io/projected/b356de7e-d5db-470d-863d-21a4dae15a4c-kube-api-access-j6wn8\") pod \"ceilometer-0\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.427764 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:15:46 crc kubenswrapper[4940]: I1126 07:15:46.877995 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:15:47 crc kubenswrapper[4940]: I1126 07:15:47.059450 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"79fc2ca6-8d13-44e6-83da-033c7f2d7df3","Type":"ContainerStarted","Data":"0ffb3bf39d8a1f37181283238b716fcbcfefeaecec4ce53764db5c4edb369cee"} Nov 26 07:15:47 crc kubenswrapper[4940]: I1126 07:15:47.059504 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"79fc2ca6-8d13-44e6-83da-033c7f2d7df3","Type":"ContainerStarted","Data":"8233c7cbc4acce8bdbeebbef4a2bf8d2190310e50e6de48733f430fc0c6cf042"} Nov 26 07:15:47 crc kubenswrapper[4940]: I1126 07:15:47.063910 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerStarted","Data":"44886a4c4429c3d8471d252a5a606731a204580ecc3fd957428bbb49a4472606"} Nov 26 07:15:47 crc kubenswrapper[4940]: I1126 07:15:47.135603 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.135579891 podStartE2EDuration="7.135579891s" podCreationTimestamp="2025-11-26 07:15:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:15:47.107922273 +0000 UTC m=+1248.628063902" watchObservedRunningTime="2025-11-26 07:15:47.135579891 +0000 UTC m=+1248.655721510" Nov 26 07:15:47 crc kubenswrapper[4940]: I1126 07:15:47.176737 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="488524db-1b89-4a01-b162-4c7fcd2aac3b" path="/var/lib/kubelet/pods/488524db-1b89-4a01-b162-4c7fcd2aac3b/volumes" Nov 26 07:15:48 crc kubenswrapper[4940]: I1126 07:15:48.093226 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerStarted","Data":"33b1f8b85b8ab7ad46ce6f250c0fbfd265e172b17ae91c9d1e8b3193786e3da4"} Nov 26 07:15:48 crc kubenswrapper[4940]: I1126 07:15:48.342580 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 07:15:48 crc kubenswrapper[4940]: I1126 07:15:48.342631 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 07:15:48 crc kubenswrapper[4940]: I1126 07:15:48.387160 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 07:15:48 crc kubenswrapper[4940]: I1126 07:15:48.406599 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 07:15:49 crc kubenswrapper[4940]: I1126 07:15:49.110080 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerStarted","Data":"e2b67954ae2a8283b16c7c53487cd1aaef4a4c26e1d91c7d7291bd5a523b56c8"} Nov 26 07:15:49 crc kubenswrapper[4940]: I1126 07:15:49.110379 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 07:15:49 crc kubenswrapper[4940]: I1126 07:15:49.110574 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 07:15:50 crc kubenswrapper[4940]: I1126 07:15:50.124940 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerStarted","Data":"8ab10c09df33d5a580ad2893f4497a7ba5796e1c50928856e44602f3b20c6568"} Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.137379 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.137635 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.137445 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerStarted","Data":"437d7372e0e4cf0691257be8373b33c9a94ddf4bba17e1269e87c1dcf7257e08"} Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.137785 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.169822 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8727569609999999 podStartE2EDuration="5.16980642s" podCreationTimestamp="2025-11-26 07:15:46 +0000 UTC" firstStartedPulling="2025-11-26 07:15:46.87984298 +0000 UTC m=+1248.399984599" lastFinishedPulling="2025-11-26 07:15:50.176892429 +0000 UTC m=+1251.697034058" observedRunningTime="2025-11-26 07:15:51.161397723 +0000 UTC m=+1252.681539362" watchObservedRunningTime="2025-11-26 07:15:51.16980642 +0000 UTC m=+1252.689948039" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.208865 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.211288 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.376982 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.377024 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.408380 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:51 crc kubenswrapper[4940]: I1126 07:15:51.437411 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:52 crc kubenswrapper[4940]: I1126 07:15:52.146294 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:52 crc kubenswrapper[4940]: I1126 07:15:52.146882 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:54 crc kubenswrapper[4940]: I1126 07:15:54.106286 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:54 crc kubenswrapper[4940]: I1126 07:15:54.110641 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 07:15:57 crc kubenswrapper[4940]: I1126 07:15:57.213137 4940 generic.go:334] "Generic (PLEG): container finished" podID="b4735f75-b279-4137-9496-36fd315fa8e8" containerID="c4639981f7e85fc58d57349ae26c0c6d76318e4de9c80c781725c8673d4d82b5" exitCode=0 Nov 26 07:15:57 crc kubenswrapper[4940]: I1126 07:15:57.213251 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" event={"ID":"b4735f75-b279-4137-9496-36fd315fa8e8","Type":"ContainerDied","Data":"c4639981f7e85fc58d57349ae26c0c6d76318e4de9c80c781725c8673d4d82b5"} Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.555666 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.582178 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-config-data\") pod \"b4735f75-b279-4137-9496-36fd315fa8e8\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.582276 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-combined-ca-bundle\") pod \"b4735f75-b279-4137-9496-36fd315fa8e8\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.582307 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwzws\" (UniqueName: \"kubernetes.io/projected/b4735f75-b279-4137-9496-36fd315fa8e8-kube-api-access-lwzws\") pod \"b4735f75-b279-4137-9496-36fd315fa8e8\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.582361 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-scripts\") pod \"b4735f75-b279-4137-9496-36fd315fa8e8\" (UID: \"b4735f75-b279-4137-9496-36fd315fa8e8\") " Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.589254 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4735f75-b279-4137-9496-36fd315fa8e8-kube-api-access-lwzws" (OuterVolumeSpecName: "kube-api-access-lwzws") pod "b4735f75-b279-4137-9496-36fd315fa8e8" (UID: "b4735f75-b279-4137-9496-36fd315fa8e8"). InnerVolumeSpecName "kube-api-access-lwzws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.589354 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-scripts" (OuterVolumeSpecName: "scripts") pod "b4735f75-b279-4137-9496-36fd315fa8e8" (UID: "b4735f75-b279-4137-9496-36fd315fa8e8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.607396 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4735f75-b279-4137-9496-36fd315fa8e8" (UID: "b4735f75-b279-4137-9496-36fd315fa8e8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.612266 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-config-data" (OuterVolumeSpecName: "config-data") pod "b4735f75-b279-4137-9496-36fd315fa8e8" (UID: "b4735f75-b279-4137-9496-36fd315fa8e8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.683783 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.683812 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.683824 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwzws\" (UniqueName: \"kubernetes.io/projected/b4735f75-b279-4137-9496-36fd315fa8e8-kube-api-access-lwzws\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:58 crc kubenswrapper[4940]: I1126 07:15:58.683833 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4735f75-b279-4137-9496-36fd315fa8e8-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.233385 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" event={"ID":"b4735f75-b279-4137-9496-36fd315fa8e8","Type":"ContainerDied","Data":"7ee4feb68bdc5c33eee39bca202cf71c97f342e4900163b57e17f573d49616ef"} Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.233652 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ee4feb68bdc5c33eee39bca202cf71c97f342e4900163b57e17f573d49616ef" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.233423 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-bf4g9" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.336858 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:15:59 crc kubenswrapper[4940]: E1126 07:15:59.337340 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4735f75-b279-4137-9496-36fd315fa8e8" containerName="nova-cell0-conductor-db-sync" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.337362 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4735f75-b279-4137-9496-36fd315fa8e8" containerName="nova-cell0-conductor-db-sync" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.337585 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4735f75-b279-4137-9496-36fd315fa8e8" containerName="nova-cell0-conductor-db-sync" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.338256 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.341145 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-xx9q7" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.342248 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.349831 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.394792 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.394974 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skbp5\" (UniqueName: \"kubernetes.io/projected/f93321ef-2519-4bc0-b3d1-a45194267ca6-kube-api-access-skbp5\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.395311 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.496584 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.496633 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.496683 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skbp5\" (UniqueName: \"kubernetes.io/projected/f93321ef-2519-4bc0-b3d1-a45194267ca6-kube-api-access-skbp5\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.507184 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.507195 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.512338 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skbp5\" (UniqueName: \"kubernetes.io/projected/f93321ef-2519-4bc0-b3d1-a45194267ca6-kube-api-access-skbp5\") pod \"nova-cell0-conductor-0\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:15:59 crc kubenswrapper[4940]: I1126 07:15:59.654294 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 07:16:00 crc kubenswrapper[4940]: I1126 07:16:00.114906 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:16:00 crc kubenswrapper[4940]: I1126 07:16:00.241735 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f93321ef-2519-4bc0-b3d1-a45194267ca6","Type":"ContainerStarted","Data":"317a1977bbab7b14a5ff1bcf543ad9858d69cbfe1d93caa4df99a433df6079af"} Nov 26 07:16:01 crc kubenswrapper[4940]: I1126 07:16:01.252560 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f93321ef-2519-4bc0-b3d1-a45194267ca6","Type":"ContainerStarted","Data":"e146e6ba7d8a0d8f7e78795f75427c31b1e5c239738564750f702ee38a6b5f61"} Nov 26 07:16:01 crc kubenswrapper[4940]: I1126 07:16:01.252913 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 07:16:01 crc kubenswrapper[4940]: I1126 07:16:01.277541 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.277520706 podStartE2EDuration="2.277520706s" podCreationTimestamp="2025-11-26 07:15:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:01.267411556 +0000 UTC m=+1262.787553175" watchObservedRunningTime="2025-11-26 07:16:01.277520706 +0000 UTC m=+1262.797662345" Nov 26 07:16:09 crc kubenswrapper[4940]: I1126 07:16:09.677803 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.113548 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-j86bw"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.115040 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.118233 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.122306 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.122989 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-j86bw"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.209469 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-config-data\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.209563 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.209601 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-scripts\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.209668 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2bvd\" (UniqueName: \"kubernetes.io/projected/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-kube-api-access-t2bvd\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.255266 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.257182 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.262817 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.275754 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.312556 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.312626 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-scripts\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.312726 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2bvd\" (UniqueName: \"kubernetes.io/projected/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-kube-api-access-t2bvd\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.312784 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-config-data\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.327901 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-scripts\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.330359 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.331789 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.334238 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-config-data\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.337431 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.339866 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.345600 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2bvd\" (UniqueName: \"kubernetes.io/projected/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-kube-api-access-t2bvd\") pod \"nova-cell0-cell-mapping-j86bw\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.358256 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.416230 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-logs\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.416297 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp9rf\" (UniqueName: \"kubernetes.io/projected/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-kube-api-access-cp9rf\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.416334 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.416587 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-config-data\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.447716 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.465538 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.467012 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.470386 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.489497 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.520328 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.520683 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlmgs\" (UniqueName: \"kubernetes.io/projected/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-kube-api-access-zlmgs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.520849 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-config-data\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.520904 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-logs\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.520943 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp9rf\" (UniqueName: \"kubernetes.io/projected/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-kube-api-access-cp9rf\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.520969 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.521014 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.522100 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-logs\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.528488 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-config-data\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.529270 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.559693 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp9rf\" (UniqueName: \"kubernetes.io/projected/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-kube-api-access-cp9rf\") pod \"nova-api-0\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.582496 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.584477 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.601556 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.602053 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.614504 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.634765 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.634935 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k89rf\" (UniqueName: \"kubernetes.io/projected/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-kube-api-access-k89rf\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.634965 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.634993 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.635089 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlmgs\" (UniqueName: \"kubernetes.io/projected/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-kube-api-access-zlmgs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.635142 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-config-data\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.645794 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.663074 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.675224 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlmgs\" (UniqueName: \"kubernetes.io/projected/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-kube-api-access-zlmgs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.677754 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5dd7c4987f-nvvnw"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.679324 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.687895 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dd7c4987f-nvvnw"] Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.736893 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.740303 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.741896 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.741926 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k89rf\" (UniqueName: \"kubernetes.io/projected/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-kube-api-access-k89rf\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.742093 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-config-data\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.742175 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b6g9\" (UniqueName: \"kubernetes.io/projected/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-kube-api-access-8b6g9\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.742284 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-config-data\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.742311 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-logs\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.746242 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.746776 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-config-data\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.777583 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k89rf\" (UniqueName: \"kubernetes.io/projected/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-kube-api-access-k89rf\") pod \"nova-scheduler-0\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.844846 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-sb\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.844895 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b6g9\" (UniqueName: \"kubernetes.io/projected/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-kube-api-access-8b6g9\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.844964 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-config-data\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.844986 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svz7p\" (UniqueName: \"kubernetes.io/projected/455e0122-ecd7-4a3a-97be-fb47b8910025-kube-api-access-svz7p\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.845002 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-logs\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.845015 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-nb\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.845080 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-config\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.845108 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-svc\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.845149 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.845285 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-swift-storage-0\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.846106 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-logs\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.850698 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.851620 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-config-data\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.862656 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b6g9\" (UniqueName: \"kubernetes.io/projected/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-kube-api-access-8b6g9\") pod \"nova-metadata-0\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " pod="openstack/nova-metadata-0" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.947003 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-swift-storage-0\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.947163 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-sb\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.947258 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svz7p\" (UniqueName: \"kubernetes.io/projected/455e0122-ecd7-4a3a-97be-fb47b8910025-kube-api-access-svz7p\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.947284 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-nb\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.947370 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-config\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.947430 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-svc\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.948275 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-sb\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.948330 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-svc\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.948330 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-nb\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.948967 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-config\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.949726 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-swift-storage-0\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.969902 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svz7p\" (UniqueName: \"kubernetes.io/projected/455e0122-ecd7-4a3a-97be-fb47b8910025-kube-api-access-svz7p\") pod \"dnsmasq-dns-5dd7c4987f-nvvnw\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:10 crc kubenswrapper[4940]: I1126 07:16:10.996457 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.015696 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.036616 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.134987 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-j86bw"] Nov 26 07:16:11 crc kubenswrapper[4940]: W1126 07:16:11.145450 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f14cc1f_7e2f_441a_9e2b_b8aeedbb26ec.slice/crio-c6fce9afb1a0a60004481148b4dbee51a318e9a83dce290394876aa2b9c4f3b6 WatchSource:0}: Error finding container c6fce9afb1a0a60004481148b4dbee51a318e9a83dce290394876aa2b9c4f3b6: Status 404 returned error can't find the container with id c6fce9afb1a0a60004481148b4dbee51a318e9a83dce290394876aa2b9c4f3b6 Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.147623 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z542t"] Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.149344 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.150509 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-scripts\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.150581 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-config-data\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.150612 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlvvd\" (UniqueName: \"kubernetes.io/projected/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-kube-api-access-dlvvd\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.150665 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.152559 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.153476 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.202431 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z542t"] Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.252287 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-scripts\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.252357 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-config-data\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.252380 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlvvd\" (UniqueName: \"kubernetes.io/projected/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-kube-api-access-dlvvd\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.252436 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.258441 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.260721 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-config-data\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.260793 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.263030 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-scripts\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.274824 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlvvd\" (UniqueName: \"kubernetes.io/projected/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-kube-api-access-dlvvd\") pod \"nova-cell1-conductor-db-sync-z542t\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.349900 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.380649 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5a19a332-3a3d-4c32-89ee-efebcf6b09c5","Type":"ContainerStarted","Data":"c3d7825e7018d5c33f8bec76d82c4a55f379179c090e6368839ac55cd2b2bc4a"} Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.391816 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j86bw" event={"ID":"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec","Type":"ContainerStarted","Data":"c6fce9afb1a0a60004481148b4dbee51a318e9a83dce290394876aa2b9c4f3b6"} Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.395925 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ba46c5dd-ffdd-46e9-9104-c1d48aa61427","Type":"ContainerStarted","Data":"00cec1c69d474ea32ca60cd2b8eec58c83a15b03bc436aaa01fa5a9e66c5848d"} Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.480023 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.541541 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:11 crc kubenswrapper[4940]: W1126 07:16:11.549601 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaebd0349_6a1d_4bc3_a74f_94907dab5a4e.slice/crio-a78f3e55b926f533535dee0934a60f23fa27663753eabe67a70c17278f968963 WatchSource:0}: Error finding container a78f3e55b926f533535dee0934a60f23fa27663753eabe67a70c17278f968963: Status 404 returned error can't find the container with id a78f3e55b926f533535dee0934a60f23fa27663753eabe67a70c17278f968963 Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.774109 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dd7c4987f-nvvnw"] Nov 26 07:16:11 crc kubenswrapper[4940]: I1126 07:16:11.797331 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.148741 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z542t"] Nov 26 07:16:12 crc kubenswrapper[4940]: W1126 07:16:12.150641 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94dc0ebf_17e6_4c5a_9263_0fe98d7cc80a.slice/crio-98183ab9152702501a8c81deda3e8689a42ded7bbdc5f1fd974ecfc14027c95a WatchSource:0}: Error finding container 98183ab9152702501a8c81deda3e8689a42ded7bbdc5f1fd974ecfc14027c95a: Status 404 returned error can't find the container with id 98183ab9152702501a8c81deda3e8689a42ded7bbdc5f1fd974ecfc14027c95a Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.415491 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j86bw" event={"ID":"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec","Type":"ContainerStarted","Data":"8d83826e05c1cc03071fc239f8b2b62ced78d755990da036c83dd3d12b2e4231"} Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.417668 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-z542t" event={"ID":"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a","Type":"ContainerStarted","Data":"98183ab9152702501a8c81deda3e8689a42ded7bbdc5f1fd974ecfc14027c95a"} Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.421907 4940 generic.go:334] "Generic (PLEG): container finished" podID="455e0122-ecd7-4a3a-97be-fb47b8910025" containerID="72e7a0c82ce7a29c8be03ae3e4dc6deec9e621f14440cc43e9a2dd27ad33bac0" exitCode=0 Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.421979 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" event={"ID":"455e0122-ecd7-4a3a-97be-fb47b8910025","Type":"ContainerDied","Data":"72e7a0c82ce7a29c8be03ae3e4dc6deec9e621f14440cc43e9a2dd27ad33bac0"} Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.422005 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" event={"ID":"455e0122-ecd7-4a3a-97be-fb47b8910025","Type":"ContainerStarted","Data":"9c9fc233627c9214c079faa6a46361b6a620342a1710d9ea80eae869af0f4028"} Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.423315 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"aebd0349-6a1d-4bc3-a74f-94907dab5a4e","Type":"ContainerStarted","Data":"a78f3e55b926f533535dee0934a60f23fa27663753eabe67a70c17278f968963"} Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.424677 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8ce9e8c2-9b63-47fe-b8b2-8806638b369e","Type":"ContainerStarted","Data":"825d7ff7e6c42abc8c9cafab6a50448c3fe2b170c2840663700b56581f201b92"} Nov 26 07:16:12 crc kubenswrapper[4940]: I1126 07:16:12.439012 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-j86bw" podStartSLOduration=2.438993883 podStartE2EDuration="2.438993883s" podCreationTimestamp="2025-11-26 07:16:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:12.431540837 +0000 UTC m=+1273.951682456" watchObservedRunningTime="2025-11-26 07:16:12.438993883 +0000 UTC m=+1273.959135492" Nov 26 07:16:13 crc kubenswrapper[4940]: I1126 07:16:13.438762 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-z542t" event={"ID":"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a","Type":"ContainerStarted","Data":"df777f9d08415a40f6552e65002da2faf05b83dc3817620a46e85b24ddaab1a1"} Nov 26 07:16:13 crc kubenswrapper[4940]: I1126 07:16:13.442309 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" event={"ID":"455e0122-ecd7-4a3a-97be-fb47b8910025","Type":"ContainerStarted","Data":"e0c636b12aa08795b31b27a1d917b15d8b732238083a8564a42737a92c658225"} Nov 26 07:16:13 crc kubenswrapper[4940]: I1126 07:16:13.459507 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-z542t" podStartSLOduration=2.45948772 podStartE2EDuration="2.45948772s" podCreationTimestamp="2025-11-26 07:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:13.456945579 +0000 UTC m=+1274.977087208" watchObservedRunningTime="2025-11-26 07:16:13.45948772 +0000 UTC m=+1274.979629339" Nov 26 07:16:13 crc kubenswrapper[4940]: I1126 07:16:13.487004 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" podStartSLOduration=3.486967641 podStartE2EDuration="3.486967641s" podCreationTimestamp="2025-11-26 07:16:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:13.47403457 +0000 UTC m=+1274.994176199" watchObservedRunningTime="2025-11-26 07:16:13.486967641 +0000 UTC m=+1275.007109260" Nov 26 07:16:14 crc kubenswrapper[4940]: I1126 07:16:14.451688 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:14 crc kubenswrapper[4940]: I1126 07:16:14.454926 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:14 crc kubenswrapper[4940]: I1126 07:16:14.512611 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.461829 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"aebd0349-6a1d-4bc3-a74f-94907dab5a4e","Type":"ContainerStarted","Data":"dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c"} Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.463727 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8ce9e8c2-9b63-47fe-b8b2-8806638b369e","Type":"ContainerStarted","Data":"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a"} Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.463765 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8ce9e8c2-9b63-47fe-b8b2-8806638b369e","Type":"ContainerStarted","Data":"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f"} Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.464071 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerName="nova-metadata-log" containerID="cri-o://a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f" gracePeriod=30 Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.464095 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerName="nova-metadata-metadata" containerID="cri-o://359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a" gracePeriod=30 Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.465283 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ba46c5dd-ffdd-46e9-9104-c1d48aa61427","Type":"ContainerStarted","Data":"a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c"} Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.465394 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="ba46c5dd-ffdd-46e9-9104-c1d48aa61427" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c" gracePeriod=30 Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.479577 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5a19a332-3a3d-4c32-89ee-efebcf6b09c5","Type":"ContainerStarted","Data":"16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc"} Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.479630 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5a19a332-3a3d-4c32-89ee-efebcf6b09c5","Type":"ContainerStarted","Data":"afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6"} Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.491031 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.834835478 podStartE2EDuration="5.491007191s" podCreationTimestamp="2025-11-26 07:16:10 +0000 UTC" firstStartedPulling="2025-11-26 07:16:11.558356373 +0000 UTC m=+1273.078497992" lastFinishedPulling="2025-11-26 07:16:14.214528086 +0000 UTC m=+1275.734669705" observedRunningTime="2025-11-26 07:16:15.484785373 +0000 UTC m=+1277.004927012" watchObservedRunningTime="2025-11-26 07:16:15.491007191 +0000 UTC m=+1277.011148820" Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.514523 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.64320051 podStartE2EDuration="5.514502956s" podCreationTimestamp="2025-11-26 07:16:10 +0000 UTC" firstStartedPulling="2025-11-26 07:16:11.340164803 +0000 UTC m=+1272.860306422" lastFinishedPulling="2025-11-26 07:16:14.211467249 +0000 UTC m=+1275.731608868" observedRunningTime="2025-11-26 07:16:15.508664571 +0000 UTC m=+1277.028806210" watchObservedRunningTime="2025-11-26 07:16:15.514502956 +0000 UTC m=+1277.034644575" Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.528352 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.200802465 podStartE2EDuration="5.528338095s" podCreationTimestamp="2025-11-26 07:16:10 +0000 UTC" firstStartedPulling="2025-11-26 07:16:11.884624311 +0000 UTC m=+1273.404765930" lastFinishedPulling="2025-11-26 07:16:14.212159941 +0000 UTC m=+1275.732301560" observedRunningTime="2025-11-26 07:16:15.527599051 +0000 UTC m=+1277.047740680" watchObservedRunningTime="2025-11-26 07:16:15.528338095 +0000 UTC m=+1277.048479714" Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.546928 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.59780564 podStartE2EDuration="5.546911454s" podCreationTimestamp="2025-11-26 07:16:10 +0000 UTC" firstStartedPulling="2025-11-26 07:16:11.263116129 +0000 UTC m=+1272.783257748" lastFinishedPulling="2025-11-26 07:16:14.212221943 +0000 UTC m=+1275.732363562" observedRunningTime="2025-11-26 07:16:15.545828569 +0000 UTC m=+1277.065970188" watchObservedRunningTime="2025-11-26 07:16:15.546911454 +0000 UTC m=+1277.067053073" Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.742544 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:15 crc kubenswrapper[4940]: I1126 07:16:15.997229 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.016611 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.016653 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.080088 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.184035 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-config-data\") pod \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.184385 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-combined-ca-bundle\") pod \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.184419 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-logs\") pod \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.184495 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b6g9\" (UniqueName: \"kubernetes.io/projected/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-kube-api-access-8b6g9\") pod \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\" (UID: \"8ce9e8c2-9b63-47fe-b8b2-8806638b369e\") " Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.185167 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-logs" (OuterVolumeSpecName: "logs") pod "8ce9e8c2-9b63-47fe-b8b2-8806638b369e" (UID: "8ce9e8c2-9b63-47fe-b8b2-8806638b369e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.193280 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-kube-api-access-8b6g9" (OuterVolumeSpecName: "kube-api-access-8b6g9") pod "8ce9e8c2-9b63-47fe-b8b2-8806638b369e" (UID: "8ce9e8c2-9b63-47fe-b8b2-8806638b369e"). InnerVolumeSpecName "kube-api-access-8b6g9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.213808 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-config-data" (OuterVolumeSpecName: "config-data") pod "8ce9e8c2-9b63-47fe-b8b2-8806638b369e" (UID: "8ce9e8c2-9b63-47fe-b8b2-8806638b369e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.214661 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ce9e8c2-9b63-47fe-b8b2-8806638b369e" (UID: "8ce9e8c2-9b63-47fe-b8b2-8806638b369e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.287714 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.287748 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.287761 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.287771 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b6g9\" (UniqueName: \"kubernetes.io/projected/8ce9e8c2-9b63-47fe-b8b2-8806638b369e-kube-api-access-8b6g9\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.439721 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.495753 4940 generic.go:334] "Generic (PLEG): container finished" podID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerID="359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a" exitCode=0 Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.495789 4940 generic.go:334] "Generic (PLEG): container finished" podID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerID="a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f" exitCode=143 Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.495850 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8ce9e8c2-9b63-47fe-b8b2-8806638b369e","Type":"ContainerDied","Data":"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a"} Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.495917 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8ce9e8c2-9b63-47fe-b8b2-8806638b369e","Type":"ContainerDied","Data":"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f"} Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.495933 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8ce9e8c2-9b63-47fe-b8b2-8806638b369e","Type":"ContainerDied","Data":"825d7ff7e6c42abc8c9cafab6a50448c3fe2b170c2840663700b56581f201b92"} Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.495942 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.495951 4940 scope.go:117] "RemoveContainer" containerID="359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.531305 4940 scope.go:117] "RemoveContainer" containerID="a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.553973 4940 scope.go:117] "RemoveContainer" containerID="359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a" Nov 26 07:16:16 crc kubenswrapper[4940]: E1126 07:16:16.554934 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a\": container with ID starting with 359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a not found: ID does not exist" containerID="359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.554983 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a"} err="failed to get container status \"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a\": rpc error: code = NotFound desc = could not find container \"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a\": container with ID starting with 359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a not found: ID does not exist" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.555015 4940 scope.go:117] "RemoveContainer" containerID="a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.555434 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:16 crc kubenswrapper[4940]: E1126 07:16:16.558094 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f\": container with ID starting with a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f not found: ID does not exist" containerID="a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.558287 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f"} err="failed to get container status \"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f\": rpc error: code = NotFound desc = could not find container \"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f\": container with ID starting with a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f not found: ID does not exist" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.558337 4940 scope.go:117] "RemoveContainer" containerID="359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.558879 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a"} err="failed to get container status \"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a\": rpc error: code = NotFound desc = could not find container \"359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a\": container with ID starting with 359e6e79df7a698c597251a3f8306903e56fff8006ddb1fa3833a3cfe4fb872a not found: ID does not exist" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.558919 4940 scope.go:117] "RemoveContainer" containerID="a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.559232 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f"} err="failed to get container status \"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f\": rpc error: code = NotFound desc = could not find container \"a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f\": container with ID starting with a7a6bbdb98374ce2802de89bc5d5fd19135617be82abc161cd551ce043aa638f not found: ID does not exist" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.570490 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.581948 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:16 crc kubenswrapper[4940]: E1126 07:16:16.582522 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerName="nova-metadata-metadata" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.582537 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerName="nova-metadata-metadata" Nov 26 07:16:16 crc kubenswrapper[4940]: E1126 07:16:16.582564 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerName="nova-metadata-log" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.582573 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerName="nova-metadata-log" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.583091 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerName="nova-metadata-log" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.583119 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" containerName="nova-metadata-metadata" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.584358 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.586948 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.588616 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.591257 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.705405 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.705488 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b582c8bf-a89b-475e-93da-daaa9619766c-logs\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.706027 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.706125 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-config-data\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.706190 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9wv2\" (UniqueName: \"kubernetes.io/projected/b582c8bf-a89b-475e-93da-daaa9619766c-kube-api-access-l9wv2\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.808480 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.808569 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-config-data\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.808636 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9wv2\" (UniqueName: \"kubernetes.io/projected/b582c8bf-a89b-475e-93da-daaa9619766c-kube-api-access-l9wv2\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.808719 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.808833 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b582c8bf-a89b-475e-93da-daaa9619766c-logs\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.809279 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b582c8bf-a89b-475e-93da-daaa9619766c-logs\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.814337 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.814691 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.825001 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-config-data\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.827242 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9wv2\" (UniqueName: \"kubernetes.io/projected/b582c8bf-a89b-475e-93da-daaa9619766c-kube-api-access-l9wv2\") pod \"nova-metadata-0\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " pod="openstack/nova-metadata-0" Nov 26 07:16:16 crc kubenswrapper[4940]: I1126 07:16:16.906490 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:17 crc kubenswrapper[4940]: I1126 07:16:17.177407 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ce9e8c2-9b63-47fe-b8b2-8806638b369e" path="/var/lib/kubelet/pods/8ce9e8c2-9b63-47fe-b8b2-8806638b369e/volumes" Nov 26 07:16:17 crc kubenswrapper[4940]: I1126 07:16:17.426509 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:17 crc kubenswrapper[4940]: W1126 07:16:17.426746 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb582c8bf_a89b_475e_93da_daaa9619766c.slice/crio-173424a574c70c5b6e680bdb46761d6700d0c4445996eff5e6623ba622863540 WatchSource:0}: Error finding container 173424a574c70c5b6e680bdb46761d6700d0c4445996eff5e6623ba622863540: Status 404 returned error can't find the container with id 173424a574c70c5b6e680bdb46761d6700d0c4445996eff5e6623ba622863540 Nov 26 07:16:17 crc kubenswrapper[4940]: I1126 07:16:17.514797 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b582c8bf-a89b-475e-93da-daaa9619766c","Type":"ContainerStarted","Data":"173424a574c70c5b6e680bdb46761d6700d0c4445996eff5e6623ba622863540"} Nov 26 07:16:18 crc kubenswrapper[4940]: I1126 07:16:18.527460 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b582c8bf-a89b-475e-93da-daaa9619766c","Type":"ContainerStarted","Data":"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43"} Nov 26 07:16:18 crc kubenswrapper[4940]: I1126 07:16:18.527936 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b582c8bf-a89b-475e-93da-daaa9619766c","Type":"ContainerStarted","Data":"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4"} Nov 26 07:16:18 crc kubenswrapper[4940]: I1126 07:16:18.545105 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.5450905539999997 podStartE2EDuration="2.545090554s" podCreationTimestamp="2025-11-26 07:16:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:18.54243792 +0000 UTC m=+1280.062579539" watchObservedRunningTime="2025-11-26 07:16:18.545090554 +0000 UTC m=+1280.065232173" Nov 26 07:16:19 crc kubenswrapper[4940]: I1126 07:16:19.539287 4940 generic.go:334] "Generic (PLEG): container finished" podID="0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" containerID="8d83826e05c1cc03071fc239f8b2b62ced78d755990da036c83dd3d12b2e4231" exitCode=0 Nov 26 07:16:19 crc kubenswrapper[4940]: I1126 07:16:19.539384 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j86bw" event={"ID":"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec","Type":"ContainerDied","Data":"8d83826e05c1cc03071fc239f8b2b62ced78d755990da036c83dd3d12b2e4231"} Nov 26 07:16:20 crc kubenswrapper[4940]: I1126 07:16:20.553569 4940 generic.go:334] "Generic (PLEG): container finished" podID="94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" containerID="df777f9d08415a40f6552e65002da2faf05b83dc3817620a46e85b24ddaab1a1" exitCode=0 Nov 26 07:16:20 crc kubenswrapper[4940]: I1126 07:16:20.553679 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-z542t" event={"ID":"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a","Type":"ContainerDied","Data":"df777f9d08415a40f6552e65002da2faf05b83dc3817620a46e85b24ddaab1a1"} Nov 26 07:16:20 crc kubenswrapper[4940]: I1126 07:16:20.602878 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:16:20 crc kubenswrapper[4940]: I1126 07:16:20.602938 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:16:20 crc kubenswrapper[4940]: I1126 07:16:20.981266 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:20 crc kubenswrapper[4940]: I1126 07:16:20.996770 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.036256 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.038752 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.104261 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-config-data\") pod \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.104665 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-combined-ca-bundle\") pod \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.104810 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-scripts\") pod \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.104910 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2bvd\" (UniqueName: \"kubernetes.io/projected/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-kube-api-access-t2bvd\") pod \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\" (UID: \"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec\") " Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.111821 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-kube-api-access-t2bvd" (OuterVolumeSpecName: "kube-api-access-t2bvd") pod "0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" (UID: "0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec"). InnerVolumeSpecName "kube-api-access-t2bvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.121254 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-scripts" (OuterVolumeSpecName: "scripts") pod "0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" (UID: "0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.121504 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-797bbc649-7w2t6"] Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.121754 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" podUID="54601667-b13d-49f2-8822-9a8c027da9ab" containerName="dnsmasq-dns" containerID="cri-o://6daed158a52521a75b3a9357fe950a97efec30361d87f114dd62e84dbf9046fd" gracePeriod=10 Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.202170 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-config-data" (OuterVolumeSpecName: "config-data") pod "0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" (UID: "0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.218454 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.218497 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.218515 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2bvd\" (UniqueName: \"kubernetes.io/projected/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-kube-api-access-t2bvd\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.232500 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" (UID: "0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.322676 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.498415 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" podUID="54601667-b13d-49f2-8822-9a8c027da9ab" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.160:5353: connect: connection refused" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.564520 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-j86bw" event={"ID":"0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec","Type":"ContainerDied","Data":"c6fce9afb1a0a60004481148b4dbee51a318e9a83dce290394876aa2b9c4f3b6"} Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.564565 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6fce9afb1a0a60004481148b4dbee51a318e9a83dce290394876aa2b9c4f3b6" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.565540 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-j86bw" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.566454 4940 generic.go:334] "Generic (PLEG): container finished" podID="54601667-b13d-49f2-8822-9a8c027da9ab" containerID="6daed158a52521a75b3a9357fe950a97efec30361d87f114dd62e84dbf9046fd" exitCode=0 Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.566504 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" event={"ID":"54601667-b13d-49f2-8822-9a8c027da9ab","Type":"ContainerDied","Data":"6daed158a52521a75b3a9357fe950a97efec30361d87f114dd62e84dbf9046fd"} Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.606686 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.693285 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.183:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.693453 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.183:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.746199 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.746457 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-log" containerID="cri-o://afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6" gracePeriod=30 Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.746956 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-api" containerID="cri-o://16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc" gracePeriod=30 Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.780620 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.780857 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" containerName="nova-metadata-log" containerID="cri-o://96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4" gracePeriod=30 Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.781530 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" containerName="nova-metadata-metadata" containerID="cri-o://b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43" gracePeriod=30 Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.906786 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:16:21 crc kubenswrapper[4940]: I1126 07:16:21.906830 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.081494 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.165344 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.184834 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.243795 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-config-data\") pod \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.243836 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-combined-ca-bundle\") pod \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.243996 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlvvd\" (UniqueName: \"kubernetes.io/projected/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-kube-api-access-dlvvd\") pod \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.244100 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-scripts\") pod \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\" (UID: \"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.252142 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-scripts" (OuterVolumeSpecName: "scripts") pod "94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" (UID: "94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.263940 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-kube-api-access-dlvvd" (OuterVolumeSpecName: "kube-api-access-dlvvd") pod "94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" (UID: "94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a"). InnerVolumeSpecName "kube-api-access-dlvvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.294297 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-config-data" (OuterVolumeSpecName: "config-data") pod "94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" (UID: "94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.302950 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" (UID: "94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.345440 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-config\") pod \"54601667-b13d-49f2-8822-9a8c027da9ab\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.345593 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-swift-storage-0\") pod \"54601667-b13d-49f2-8822-9a8c027da9ab\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.345627 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-nb\") pod \"54601667-b13d-49f2-8822-9a8c027da9ab\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.345653 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhjcx\" (UniqueName: \"kubernetes.io/projected/54601667-b13d-49f2-8822-9a8c027da9ab-kube-api-access-hhjcx\") pod \"54601667-b13d-49f2-8822-9a8c027da9ab\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.345719 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-svc\") pod \"54601667-b13d-49f2-8822-9a8c027da9ab\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.345779 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-sb\") pod \"54601667-b13d-49f2-8822-9a8c027da9ab\" (UID: \"54601667-b13d-49f2-8822-9a8c027da9ab\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.346330 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.346349 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.346361 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dlvvd\" (UniqueName: \"kubernetes.io/projected/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-kube-api-access-dlvvd\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.346373 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.350408 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54601667-b13d-49f2-8822-9a8c027da9ab-kube-api-access-hhjcx" (OuterVolumeSpecName: "kube-api-access-hhjcx") pod "54601667-b13d-49f2-8822-9a8c027da9ab" (UID: "54601667-b13d-49f2-8822-9a8c027da9ab"). InnerVolumeSpecName "kube-api-access-hhjcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.404725 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "54601667-b13d-49f2-8822-9a8c027da9ab" (UID: "54601667-b13d-49f2-8822-9a8c027da9ab"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.419749 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-config" (OuterVolumeSpecName: "config") pod "54601667-b13d-49f2-8822-9a8c027da9ab" (UID: "54601667-b13d-49f2-8822-9a8c027da9ab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.421007 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "54601667-b13d-49f2-8822-9a8c027da9ab" (UID: "54601667-b13d-49f2-8822-9a8c027da9ab"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.422318 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.432026 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "54601667-b13d-49f2-8822-9a8c027da9ab" (UID: "54601667-b13d-49f2-8822-9a8c027da9ab"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.445978 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "54601667-b13d-49f2-8822-9a8c027da9ab" (UID: "54601667-b13d-49f2-8822-9a8c027da9ab"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.448323 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.448364 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhjcx\" (UniqueName: \"kubernetes.io/projected/54601667-b13d-49f2-8822-9a8c027da9ab-kube-api-access-hhjcx\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.448376 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.448388 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.448399 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.448407 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54601667-b13d-49f2-8822-9a8c027da9ab-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.549777 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b582c8bf-a89b-475e-93da-daaa9619766c-logs\") pod \"b582c8bf-a89b-475e-93da-daaa9619766c\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.549847 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-nova-metadata-tls-certs\") pod \"b582c8bf-a89b-475e-93da-daaa9619766c\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.549913 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-combined-ca-bundle\") pod \"b582c8bf-a89b-475e-93da-daaa9619766c\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.550031 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b582c8bf-a89b-475e-93da-daaa9619766c-logs" (OuterVolumeSpecName: "logs") pod "b582c8bf-a89b-475e-93da-daaa9619766c" (UID: "b582c8bf-a89b-475e-93da-daaa9619766c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.550068 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9wv2\" (UniqueName: \"kubernetes.io/projected/b582c8bf-a89b-475e-93da-daaa9619766c-kube-api-access-l9wv2\") pod \"b582c8bf-a89b-475e-93da-daaa9619766c\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.550097 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-config-data\") pod \"b582c8bf-a89b-475e-93da-daaa9619766c\" (UID: \"b582c8bf-a89b-475e-93da-daaa9619766c\") " Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.550829 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b582c8bf-a89b-475e-93da-daaa9619766c-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.554177 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b582c8bf-a89b-475e-93da-daaa9619766c-kube-api-access-l9wv2" (OuterVolumeSpecName: "kube-api-access-l9wv2") pod "b582c8bf-a89b-475e-93da-daaa9619766c" (UID: "b582c8bf-a89b-475e-93da-daaa9619766c"). InnerVolumeSpecName "kube-api-access-l9wv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.582205 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b582c8bf-a89b-475e-93da-daaa9619766c" (UID: "b582c8bf-a89b-475e-93da-daaa9619766c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.584530 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" event={"ID":"54601667-b13d-49f2-8822-9a8c027da9ab","Type":"ContainerDied","Data":"06f683e18b2a1c557d06f4ff29d6f71e08bc0fb3ce34b94834d533aace5305c5"} Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.584586 4940 scope.go:117] "RemoveContainer" containerID="6daed158a52521a75b3a9357fe950a97efec30361d87f114dd62e84dbf9046fd" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.584747 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-797bbc649-7w2t6" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.587608 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-config-data" (OuterVolumeSpecName: "config-data") pod "b582c8bf-a89b-475e-93da-daaa9619766c" (UID: "b582c8bf-a89b-475e-93da-daaa9619766c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.588590 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-z542t" event={"ID":"94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a","Type":"ContainerDied","Data":"98183ab9152702501a8c81deda3e8689a42ded7bbdc5f1fd974ecfc14027c95a"} Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.588629 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98183ab9152702501a8c81deda3e8689a42ded7bbdc5f1fd974ecfc14027c95a" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.588694 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-z542t" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.596863 4940 generic.go:334] "Generic (PLEG): container finished" podID="b582c8bf-a89b-475e-93da-daaa9619766c" containerID="b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43" exitCode=0 Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.596891 4940 generic.go:334] "Generic (PLEG): container finished" podID="b582c8bf-a89b-475e-93da-daaa9619766c" containerID="96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4" exitCode=143 Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.596933 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b582c8bf-a89b-475e-93da-daaa9619766c","Type":"ContainerDied","Data":"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43"} Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.596964 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b582c8bf-a89b-475e-93da-daaa9619766c","Type":"ContainerDied","Data":"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4"} Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.596978 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b582c8bf-a89b-475e-93da-daaa9619766c","Type":"ContainerDied","Data":"173424a574c70c5b6e680bdb46761d6700d0c4445996eff5e6623ba622863540"} Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.597032 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.608998 4940 generic.go:334] "Generic (PLEG): container finished" podID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerID="afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6" exitCode=143 Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.610339 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5a19a332-3a3d-4c32-89ee-efebcf6b09c5","Type":"ContainerDied","Data":"afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6"} Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.621139 4940 scope.go:117] "RemoveContainer" containerID="4f89de5a9c0ac53d4f1cb615a14cdae9084088d4bd6ad5281f6799ca7a6af6e5" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.648033 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "b582c8bf-a89b-475e-93da-daaa9619766c" (UID: "b582c8bf-a89b-475e-93da-daaa9619766c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.654007 4940 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.654106 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.654119 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9wv2\" (UniqueName: \"kubernetes.io/projected/b582c8bf-a89b-475e-93da-daaa9619766c-kube-api-access-l9wv2\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.654130 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b582c8bf-a89b-475e-93da-daaa9619766c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.658541 4940 scope.go:117] "RemoveContainer" containerID="b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.666187 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-797bbc649-7w2t6"] Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.680280 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-797bbc649-7w2t6"] Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.689515 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.689976 4940 scope.go:117] "RemoveContainer" containerID="96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4" Nov 26 07:16:22 crc kubenswrapper[4940]: E1126 07:16:22.690194 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54601667-b13d-49f2-8822-9a8c027da9ab" containerName="dnsmasq-dns" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690217 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="54601667-b13d-49f2-8822-9a8c027da9ab" containerName="dnsmasq-dns" Nov 26 07:16:22 crc kubenswrapper[4940]: E1126 07:16:22.690235 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" containerName="nova-metadata-metadata" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690240 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" containerName="nova-metadata-metadata" Nov 26 07:16:22 crc kubenswrapper[4940]: E1126 07:16:22.690260 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" containerName="nova-manage" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690267 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" containerName="nova-manage" Nov 26 07:16:22 crc kubenswrapper[4940]: E1126 07:16:22.690277 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" containerName="nova-cell1-conductor-db-sync" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690283 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" containerName="nova-cell1-conductor-db-sync" Nov 26 07:16:22 crc kubenswrapper[4940]: E1126 07:16:22.690296 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" containerName="nova-metadata-log" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690302 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" containerName="nova-metadata-log" Nov 26 07:16:22 crc kubenswrapper[4940]: E1126 07:16:22.690316 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54601667-b13d-49f2-8822-9a8c027da9ab" containerName="init" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690321 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="54601667-b13d-49f2-8822-9a8c027da9ab" containerName="init" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690481 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" containerName="nova-metadata-metadata" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690496 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" containerName="nova-metadata-log" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690508 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" containerName="nova-cell1-conductor-db-sync" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690529 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" containerName="nova-manage" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.690537 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="54601667-b13d-49f2-8822-9a8c027da9ab" containerName="dnsmasq-dns" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.691191 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.693590 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.698937 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.734514 4940 scope.go:117] "RemoveContainer" containerID="b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43" Nov 26 07:16:22 crc kubenswrapper[4940]: E1126 07:16:22.734928 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43\": container with ID starting with b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43 not found: ID does not exist" containerID="b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.734957 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43"} err="failed to get container status \"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43\": rpc error: code = NotFound desc = could not find container \"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43\": container with ID starting with b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43 not found: ID does not exist" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.734982 4940 scope.go:117] "RemoveContainer" containerID="96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4" Nov 26 07:16:22 crc kubenswrapper[4940]: E1126 07:16:22.739145 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4\": container with ID starting with 96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4 not found: ID does not exist" containerID="96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.739199 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4"} err="failed to get container status \"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4\": rpc error: code = NotFound desc = could not find container \"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4\": container with ID starting with 96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4 not found: ID does not exist" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.739219 4940 scope.go:117] "RemoveContainer" containerID="b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.739528 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43"} err="failed to get container status \"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43\": rpc error: code = NotFound desc = could not find container \"b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43\": container with ID starting with b2c76340dd4f95d20b438ca68e9e755bcaeff4c86eb120fe300c7b9d837cbf43 not found: ID does not exist" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.739549 4940 scope.go:117] "RemoveContainer" containerID="96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.739753 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4"} err="failed to get container status \"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4\": rpc error: code = NotFound desc = could not find container \"96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4\": container with ID starting with 96761b278caa83c65eed7628892699f78ae234456710d5b179ecc95d9b0845f4 not found: ID does not exist" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.857954 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7rmr\" (UniqueName: \"kubernetes.io/projected/160032d4-a9c0-4b2c-be8b-f4a5c188c451-kube-api-access-d7rmr\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.858082 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.858111 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.961114 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.961372 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.961622 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7rmr\" (UniqueName: \"kubernetes.io/projected/160032d4-a9c0-4b2c-be8b-f4a5c188c451-kube-api-access-d7rmr\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.966434 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.967364 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.980824 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7rmr\" (UniqueName: \"kubernetes.io/projected/160032d4-a9c0-4b2c-be8b-f4a5c188c451-kube-api-access-d7rmr\") pod \"nova-cell1-conductor-0\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.981025 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:22 crc kubenswrapper[4940]: I1126 07:16:22.996963 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.006273 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.007764 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.011393 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.011657 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.025867 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.032604 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.174758 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-config-data\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.174800 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-logs\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.174843 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.174911 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.174929 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w48qb\" (UniqueName: \"kubernetes.io/projected/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-kube-api-access-w48qb\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.185630 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54601667-b13d-49f2-8822-9a8c027da9ab" path="/var/lib/kubelet/pods/54601667-b13d-49f2-8822-9a8c027da9ab/volumes" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.186625 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b582c8bf-a89b-475e-93da-daaa9619766c" path="/var/lib/kubelet/pods/b582c8bf-a89b-475e-93da-daaa9619766c/volumes" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.277577 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.277711 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.277730 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w48qb\" (UniqueName: \"kubernetes.io/projected/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-kube-api-access-w48qb\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.277805 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-config-data\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.277835 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-logs\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.278392 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-logs\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.282256 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.283357 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-config-data\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.292422 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.302733 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w48qb\" (UniqueName: \"kubernetes.io/projected/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-kube-api-access-w48qb\") pod \"nova-metadata-0\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.480869 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.520705 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:16:23 crc kubenswrapper[4940]: W1126 07:16:23.524367 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod160032d4_a9c0_4b2c_be8b_f4a5c188c451.slice/crio-c2592e76eb31ed86574bb5b71225ff13cf6c9900286471a8f7b4496fe66556ed WatchSource:0}: Error finding container c2592e76eb31ed86574bb5b71225ff13cf6c9900286471a8f7b4496fe66556ed: Status 404 returned error can't find the container with id c2592e76eb31ed86574bb5b71225ff13cf6c9900286471a8f7b4496fe66556ed Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.629564 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"160032d4-a9c0-4b2c-be8b-f4a5c188c451","Type":"ContainerStarted","Data":"c2592e76eb31ed86574bb5b71225ff13cf6c9900286471a8f7b4496fe66556ed"} Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.632217 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="aebd0349-6a1d-4bc3-a74f-94907dab5a4e" containerName="nova-scheduler-scheduler" containerID="cri-o://dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c" gracePeriod=30 Nov 26 07:16:23 crc kubenswrapper[4940]: W1126 07:16:23.929250 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2752e9c9_51d4_4e2c_9f07_c87e36d243ee.slice/crio-f4235c870573402e91de3990cae0827d3b62e4889b16b999936cd3f2f2f49080 WatchSource:0}: Error finding container f4235c870573402e91de3990cae0827d3b62e4889b16b999936cd3f2f2f49080: Status 404 returned error can't find the container with id f4235c870573402e91de3990cae0827d3b62e4889b16b999936cd3f2f2f49080 Nov 26 07:16:23 crc kubenswrapper[4940]: I1126 07:16:23.930722 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:16:24 crc kubenswrapper[4940]: I1126 07:16:24.640831 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"160032d4-a9c0-4b2c-be8b-f4a5c188c451","Type":"ContainerStarted","Data":"af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731"} Nov 26 07:16:24 crc kubenswrapper[4940]: I1126 07:16:24.641331 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:24 crc kubenswrapper[4940]: I1126 07:16:24.643930 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2752e9c9-51d4-4e2c-9f07-c87e36d243ee","Type":"ContainerStarted","Data":"20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf"} Nov 26 07:16:24 crc kubenswrapper[4940]: I1126 07:16:24.644087 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2752e9c9-51d4-4e2c-9f07-c87e36d243ee","Type":"ContainerStarted","Data":"c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7"} Nov 26 07:16:24 crc kubenswrapper[4940]: I1126 07:16:24.644168 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2752e9c9-51d4-4e2c-9f07-c87e36d243ee","Type":"ContainerStarted","Data":"f4235c870573402e91de3990cae0827d3b62e4889b16b999936cd3f2f2f49080"} Nov 26 07:16:24 crc kubenswrapper[4940]: I1126 07:16:24.665178 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.665160038 podStartE2EDuration="2.665160038s" podCreationTimestamp="2025-11-26 07:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:24.660215621 +0000 UTC m=+1286.180357250" watchObservedRunningTime="2025-11-26 07:16:24.665160038 +0000 UTC m=+1286.185301657" Nov 26 07:16:26 crc kubenswrapper[4940]: E1126 07:16:25.999957 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:16:26 crc kubenswrapper[4940]: E1126 07:16:26.001994 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:16:26 crc kubenswrapper[4940]: E1126 07:16:26.003812 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:16:26 crc kubenswrapper[4940]: E1126 07:16:26.003922 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="aebd0349-6a1d-4bc3-a74f-94907dab5a4e" containerName="nova-scheduler-scheduler" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.522422 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.544978 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.544954627 podStartE2EDuration="4.544954627s" podCreationTimestamp="2025-11-26 07:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:24.688287601 +0000 UTC m=+1286.208429230" watchObservedRunningTime="2025-11-26 07:16:26.544954627 +0000 UTC m=+1288.065096246" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.645396 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-config-data\") pod \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.645641 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k89rf\" (UniqueName: \"kubernetes.io/projected/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-kube-api-access-k89rf\") pod \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.645746 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-combined-ca-bundle\") pod \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\" (UID: \"aebd0349-6a1d-4bc3-a74f-94907dab5a4e\") " Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.653893 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-kube-api-access-k89rf" (OuterVolumeSpecName: "kube-api-access-k89rf") pod "aebd0349-6a1d-4bc3-a74f-94907dab5a4e" (UID: "aebd0349-6a1d-4bc3-a74f-94907dab5a4e"). InnerVolumeSpecName "kube-api-access-k89rf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.672244 4940 generic.go:334] "Generic (PLEG): container finished" podID="aebd0349-6a1d-4bc3-a74f-94907dab5a4e" containerID="dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c" exitCode=0 Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.672298 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"aebd0349-6a1d-4bc3-a74f-94907dab5a4e","Type":"ContainerDied","Data":"dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c"} Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.672329 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"aebd0349-6a1d-4bc3-a74f-94907dab5a4e","Type":"ContainerDied","Data":"a78f3e55b926f533535dee0934a60f23fa27663753eabe67a70c17278f968963"} Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.672349 4940 scope.go:117] "RemoveContainer" containerID="dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.672532 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.675930 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-config-data" (OuterVolumeSpecName: "config-data") pod "aebd0349-6a1d-4bc3-a74f-94907dab5a4e" (UID: "aebd0349-6a1d-4bc3-a74f-94907dab5a4e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.687661 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aebd0349-6a1d-4bc3-a74f-94907dab5a4e" (UID: "aebd0349-6a1d-4bc3-a74f-94907dab5a4e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.747363 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k89rf\" (UniqueName: \"kubernetes.io/projected/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-kube-api-access-k89rf\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.747397 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.747407 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebd0349-6a1d-4bc3-a74f-94907dab5a4e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.762816 4940 scope.go:117] "RemoveContainer" containerID="dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c" Nov 26 07:16:26 crc kubenswrapper[4940]: E1126 07:16:26.763278 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c\": container with ID starting with dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c not found: ID does not exist" containerID="dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.763336 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c"} err="failed to get container status \"dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c\": rpc error: code = NotFound desc = could not find container \"dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c\": container with ID starting with dfc0c0b5f0d058875c8ddc456ef08c36f6884d4edcaf754f16ca50668bf92c8c not found: ID does not exist" Nov 26 07:16:26 crc kubenswrapper[4940]: I1126 07:16:26.999610 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.011847 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.049187 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:27 crc kubenswrapper[4940]: E1126 07:16:27.050698 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aebd0349-6a1d-4bc3-a74f-94907dab5a4e" containerName="nova-scheduler-scheduler" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.050721 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="aebd0349-6a1d-4bc3-a74f-94907dab5a4e" containerName="nova-scheduler-scheduler" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.051123 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="aebd0349-6a1d-4bc3-a74f-94907dab5a4e" containerName="nova-scheduler-scheduler" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.052157 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.056264 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.066980 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-config-data\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.067204 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwkqf\" (UniqueName: \"kubernetes.io/projected/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-kube-api-access-nwkqf\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.069602 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.076654 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.172938 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.173096 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-config-data\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.173122 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwkqf\" (UniqueName: \"kubernetes.io/projected/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-kube-api-access-nwkqf\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.177678 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aebd0349-6a1d-4bc3-a74f-94907dab5a4e" path="/var/lib/kubelet/pods/aebd0349-6a1d-4bc3-a74f-94907dab5a4e/volumes" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.178264 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-config-data\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.184800 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.191574 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwkqf\" (UniqueName: \"kubernetes.io/projected/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-kube-api-access-nwkqf\") pod \"nova-scheduler-0\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.377628 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.558821 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.682936 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-logs\") pod \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.682985 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-config-data\") pod \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.683077 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cp9rf\" (UniqueName: \"kubernetes.io/projected/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-kube-api-access-cp9rf\") pod \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.683118 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-combined-ca-bundle\") pod \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\" (UID: \"5a19a332-3a3d-4c32-89ee-efebcf6b09c5\") " Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.684901 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-logs" (OuterVolumeSpecName: "logs") pod "5a19a332-3a3d-4c32-89ee-efebcf6b09c5" (UID: "5a19a332-3a3d-4c32-89ee-efebcf6b09c5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.685330 4940 generic.go:334] "Generic (PLEG): container finished" podID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerID="16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc" exitCode=0 Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.685391 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5a19a332-3a3d-4c32-89ee-efebcf6b09c5","Type":"ContainerDied","Data":"16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc"} Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.685418 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5a19a332-3a3d-4c32-89ee-efebcf6b09c5","Type":"ContainerDied","Data":"c3d7825e7018d5c33f8bec76d82c4a55f379179c090e6368839ac55cd2b2bc4a"} Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.685434 4940 scope.go:117] "RemoveContainer" containerID="16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.685548 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.688917 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-kube-api-access-cp9rf" (OuterVolumeSpecName: "kube-api-access-cp9rf") pod "5a19a332-3a3d-4c32-89ee-efebcf6b09c5" (UID: "5a19a332-3a3d-4c32-89ee-efebcf6b09c5"). InnerVolumeSpecName "kube-api-access-cp9rf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.709980 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-config-data" (OuterVolumeSpecName: "config-data") pod "5a19a332-3a3d-4c32-89ee-efebcf6b09c5" (UID: "5a19a332-3a3d-4c32-89ee-efebcf6b09c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.718017 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a19a332-3a3d-4c32-89ee-efebcf6b09c5" (UID: "5a19a332-3a3d-4c32-89ee-efebcf6b09c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.745311 4940 scope.go:117] "RemoveContainer" containerID="afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.761508 4940 scope.go:117] "RemoveContainer" containerID="16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc" Nov 26 07:16:27 crc kubenswrapper[4940]: E1126 07:16:27.761940 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc\": container with ID starting with 16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc not found: ID does not exist" containerID="16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.761997 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc"} err="failed to get container status \"16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc\": rpc error: code = NotFound desc = could not find container \"16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc\": container with ID starting with 16f6b0b29ade1e1305ed2951b1a2b827ed3773eacf74c5423311bd679a3606bc not found: ID does not exist" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.762056 4940 scope.go:117] "RemoveContainer" containerID="afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6" Nov 26 07:16:27 crc kubenswrapper[4940]: E1126 07:16:27.762476 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6\": container with ID starting with afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6 not found: ID does not exist" containerID="afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.762504 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6"} err="failed to get container status \"afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6\": rpc error: code = NotFound desc = could not find container \"afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6\": container with ID starting with afb99636ea0154013959e0c4dd439b7e1d1d5ab1cde138e003859541cacea8d6 not found: ID does not exist" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.787086 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.787133 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.787150 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cp9rf\" (UniqueName: \"kubernetes.io/projected/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-kube-api-access-cp9rf\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.787163 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a19a332-3a3d-4c32-89ee-efebcf6b09c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:27 crc kubenswrapper[4940]: I1126 07:16:27.844687 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:16:27 crc kubenswrapper[4940]: W1126 07:16:27.851542 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fbd518d_ce50_4811_8dc1_74d12a09eb8c.slice/crio-e8c4bb20c71fd4dd8f9333094478285cf8decb6846cf6cb748563bda36c0160a WatchSource:0}: Error finding container e8c4bb20c71fd4dd8f9333094478285cf8decb6846cf6cb748563bda36c0160a: Status 404 returned error can't find the container with id e8c4bb20c71fd4dd8f9333094478285cf8decb6846cf6cb748563bda36c0160a Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.025337 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.045178 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.059330 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:28 crc kubenswrapper[4940]: E1126 07:16:28.064298 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-api" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.064761 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-api" Nov 26 07:16:28 crc kubenswrapper[4940]: E1126 07:16:28.064907 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-log" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.064993 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-log" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.065562 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-log" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.065665 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" containerName="nova-api-api" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.066899 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.070447 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.078741 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.096843 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cfb4e54-3927-4275-930d-4922b57f7c81-logs\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.097144 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cdlb\" (UniqueName: \"kubernetes.io/projected/4cfb4e54-3927-4275-930d-4922b57f7c81-kube-api-access-9cdlb\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.097258 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.097359 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-config-data\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.165460 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.198128 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cfb4e54-3927-4275-930d-4922b57f7c81-logs\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.198357 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.198482 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cdlb\" (UniqueName: \"kubernetes.io/projected/4cfb4e54-3927-4275-930d-4922b57f7c81-kube-api-access-9cdlb\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.198599 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-config-data\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.199882 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cfb4e54-3927-4275-930d-4922b57f7c81-logs\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.204285 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.204623 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-config-data\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.218716 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cdlb\" (UniqueName: \"kubernetes.io/projected/4cfb4e54-3927-4275-930d-4922b57f7c81-kube-api-access-9cdlb\") pod \"nova-api-0\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.411363 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.481568 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.481682 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.712373 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fbd518d-ce50-4811-8dc1-74d12a09eb8c","Type":"ContainerStarted","Data":"55a9ba5541c0f40f9c714ac19a220739b7c722566d746d2644ec3d3b672ef513"} Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.712708 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fbd518d-ce50-4811-8dc1-74d12a09eb8c","Type":"ContainerStarted","Data":"e8c4bb20c71fd4dd8f9333094478285cf8decb6846cf6cb748563bda36c0160a"} Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.747005 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.746982026 podStartE2EDuration="1.746982026s" podCreationTimestamp="2025-11-26 07:16:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:28.739911383 +0000 UTC m=+1290.260053002" watchObservedRunningTime="2025-11-26 07:16:28.746982026 +0000 UTC m=+1290.267123665" Nov 26 07:16:28 crc kubenswrapper[4940]: I1126 07:16:28.955058 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:28 crc kubenswrapper[4940]: W1126 07:16:28.956322 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cfb4e54_3927_4275_930d_4922b57f7c81.slice/crio-d26dbdf1158592ee4873cbefc9fe43ea412c0c8870112ec0c3c7177b78b51d02 WatchSource:0}: Error finding container d26dbdf1158592ee4873cbefc9fe43ea412c0c8870112ec0c3c7177b78b51d02: Status 404 returned error can't find the container with id d26dbdf1158592ee4873cbefc9fe43ea412c0c8870112ec0c3c7177b78b51d02 Nov 26 07:16:29 crc kubenswrapper[4940]: I1126 07:16:29.213326 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a19a332-3a3d-4c32-89ee-efebcf6b09c5" path="/var/lib/kubelet/pods/5a19a332-3a3d-4c32-89ee-efebcf6b09c5/volumes" Nov 26 07:16:29 crc kubenswrapper[4940]: I1126 07:16:29.727794 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4cfb4e54-3927-4275-930d-4922b57f7c81","Type":"ContainerStarted","Data":"b0e53e491751fb29c142c49d72a4d6c4f53ce3a88478fa258f5ccfac8147f328"} Nov 26 07:16:29 crc kubenswrapper[4940]: I1126 07:16:29.728093 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4cfb4e54-3927-4275-930d-4922b57f7c81","Type":"ContainerStarted","Data":"34cc3b8ae2b6091c706beb0cb19a74c1b0fcf21737cf22107d472b58438c8454"} Nov 26 07:16:29 crc kubenswrapper[4940]: I1126 07:16:29.728103 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4cfb4e54-3927-4275-930d-4922b57f7c81","Type":"ContainerStarted","Data":"d26dbdf1158592ee4873cbefc9fe43ea412c0c8870112ec0c3c7177b78b51d02"} Nov 26 07:16:29 crc kubenswrapper[4940]: I1126 07:16:29.750867 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.750849175 podStartE2EDuration="1.750849175s" podCreationTimestamp="2025-11-26 07:16:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:29.74500066 +0000 UTC m=+1291.265142289" watchObservedRunningTime="2025-11-26 07:16:29.750849175 +0000 UTC m=+1291.270990794" Nov 26 07:16:32 crc kubenswrapper[4940]: I1126 07:16:32.378665 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 07:16:33 crc kubenswrapper[4940]: I1126 07:16:33.481518 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 07:16:33 crc kubenswrapper[4940]: I1126 07:16:33.481595 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 07:16:34 crc kubenswrapper[4940]: I1126 07:16:34.495172 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:16:34 crc kubenswrapper[4940]: I1126 07:16:34.495206 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:16:37 crc kubenswrapper[4940]: I1126 07:16:37.378410 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 07:16:37 crc kubenswrapper[4940]: I1126 07:16:37.407659 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 07:16:37 crc kubenswrapper[4940]: I1126 07:16:37.813435 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 07:16:38 crc kubenswrapper[4940]: I1126 07:16:38.412761 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:16:38 crc kubenswrapper[4940]: I1126 07:16:38.413813 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:16:39 crc kubenswrapper[4940]: I1126 07:16:39.494405 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 07:16:39 crc kubenswrapper[4940]: I1126 07:16:39.494797 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 07:16:43 crc kubenswrapper[4940]: I1126 07:16:43.486250 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 07:16:43 crc kubenswrapper[4940]: I1126 07:16:43.487215 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 07:16:43 crc kubenswrapper[4940]: I1126 07:16:43.491758 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 07:16:43 crc kubenswrapper[4940]: I1126 07:16:43.876831 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.857502 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.890167 4940 generic.go:334] "Generic (PLEG): container finished" podID="ba46c5dd-ffdd-46e9-9104-c1d48aa61427" containerID="a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c" exitCode=137 Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.890240 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.890500 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ba46c5dd-ffdd-46e9-9104-c1d48aa61427","Type":"ContainerDied","Data":"a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c"} Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.890674 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ba46c5dd-ffdd-46e9-9104-c1d48aa61427","Type":"ContainerDied","Data":"00cec1c69d474ea32ca60cd2b8eec58c83a15b03bc436aaa01fa5a9e66c5848d"} Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.890739 4940 scope.go:117] "RemoveContainer" containerID="a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c" Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.915847 4940 scope.go:117] "RemoveContainer" containerID="a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c" Nov 26 07:16:45 crc kubenswrapper[4940]: E1126 07:16:45.916408 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c\": container with ID starting with a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c not found: ID does not exist" containerID="a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c" Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.916439 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c"} err="failed to get container status \"a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c\": rpc error: code = NotFound desc = could not find container \"a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c\": container with ID starting with a739f7d6957e34329d233af8b755605567a045ec0c8d1b981dfa8d57fcd20a7c not found: ID does not exist" Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.950216 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-combined-ca-bundle\") pod \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.950382 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlmgs\" (UniqueName: \"kubernetes.io/projected/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-kube-api-access-zlmgs\") pod \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.950488 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-config-data\") pod \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\" (UID: \"ba46c5dd-ffdd-46e9-9104-c1d48aa61427\") " Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.956823 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-kube-api-access-zlmgs" (OuterVolumeSpecName: "kube-api-access-zlmgs") pod "ba46c5dd-ffdd-46e9-9104-c1d48aa61427" (UID: "ba46c5dd-ffdd-46e9-9104-c1d48aa61427"). InnerVolumeSpecName "kube-api-access-zlmgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.980941 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba46c5dd-ffdd-46e9-9104-c1d48aa61427" (UID: "ba46c5dd-ffdd-46e9-9104-c1d48aa61427"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:45 crc kubenswrapper[4940]: I1126 07:16:45.981937 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-config-data" (OuterVolumeSpecName: "config-data") pod "ba46c5dd-ffdd-46e9-9104-c1d48aa61427" (UID: "ba46c5dd-ffdd-46e9-9104-c1d48aa61427"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.053026 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.053091 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlmgs\" (UniqueName: \"kubernetes.io/projected/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-kube-api-access-zlmgs\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.053107 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba46c5dd-ffdd-46e9-9104-c1d48aa61427-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.224907 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.232610 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.262861 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:46 crc kubenswrapper[4940]: E1126 07:16:46.263367 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba46c5dd-ffdd-46e9-9104-c1d48aa61427" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.263393 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba46c5dd-ffdd-46e9-9104-c1d48aa61427" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.263640 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba46c5dd-ffdd-46e9-9104-c1d48aa61427" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.264523 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.267379 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.267653 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.272176 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.274139 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.358128 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.358437 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.358518 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.358624 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rztjt\" (UniqueName: \"kubernetes.io/projected/90360054-700b-4de8-9f51-f9b19cde50e0-kube-api-access-rztjt\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.358727 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.460358 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rztjt\" (UniqueName: \"kubernetes.io/projected/90360054-700b-4de8-9f51-f9b19cde50e0-kube-api-access-rztjt\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.460659 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.460747 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.460794 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.460817 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.463946 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.463946 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.464598 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.464662 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.476342 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rztjt\" (UniqueName: \"kubernetes.io/projected/90360054-700b-4de8-9f51-f9b19cde50e0-kube-api-access-rztjt\") pod \"nova-cell1-novncproxy-0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:46 crc kubenswrapper[4940]: I1126 07:16:46.596158 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:47 crc kubenswrapper[4940]: I1126 07:16:47.065120 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:16:47 crc kubenswrapper[4940]: W1126 07:16:47.065219 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90360054_700b_4de8_9f51_f9b19cde50e0.slice/crio-169a79d6843b929d05adb9ff8717c20874f5ae70bc665e62df1f65007ff60e50 WatchSource:0}: Error finding container 169a79d6843b929d05adb9ff8717c20874f5ae70bc665e62df1f65007ff60e50: Status 404 returned error can't find the container with id 169a79d6843b929d05adb9ff8717c20874f5ae70bc665e62df1f65007ff60e50 Nov 26 07:16:47 crc kubenswrapper[4940]: I1126 07:16:47.178763 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba46c5dd-ffdd-46e9-9104-c1d48aa61427" path="/var/lib/kubelet/pods/ba46c5dd-ffdd-46e9-9104-c1d48aa61427/volumes" Nov 26 07:16:47 crc kubenswrapper[4940]: I1126 07:16:47.910908 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"90360054-700b-4de8-9f51-f9b19cde50e0","Type":"ContainerStarted","Data":"a5e3b511393841e8f4d387df3c57cc2eeb6640159bbbae1ce4b53392fe9b9546"} Nov 26 07:16:47 crc kubenswrapper[4940]: I1126 07:16:47.911368 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"90360054-700b-4de8-9f51-f9b19cde50e0","Type":"ContainerStarted","Data":"169a79d6843b929d05adb9ff8717c20874f5ae70bc665e62df1f65007ff60e50"} Nov 26 07:16:47 crc kubenswrapper[4940]: I1126 07:16:47.932756 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.932735171 podStartE2EDuration="1.932735171s" podCreationTimestamp="2025-11-26 07:16:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:47.93239662 +0000 UTC m=+1309.452538239" watchObservedRunningTime="2025-11-26 07:16:47.932735171 +0000 UTC m=+1309.452876790" Nov 26 07:16:48 crc kubenswrapper[4940]: I1126 07:16:48.417442 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 07:16:48 crc kubenswrapper[4940]: I1126 07:16:48.418354 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 07:16:48 crc kubenswrapper[4940]: I1126 07:16:48.418464 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 07:16:48 crc kubenswrapper[4940]: I1126 07:16:48.424352 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 07:16:48 crc kubenswrapper[4940]: I1126 07:16:48.920002 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 07:16:48 crc kubenswrapper[4940]: I1126 07:16:48.923165 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.133789 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d7f54fb65-6vndb"] Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.137887 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.192527 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7f54fb65-6vndb"] Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.221088 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-nb\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.221156 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-config\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.221279 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-swift-storage-0\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.221379 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbpb7\" (UniqueName: \"kubernetes.io/projected/0a428662-7264-4abe-837b-64739810c829-kube-api-access-xbpb7\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.221440 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-svc\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.221485 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-sb\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.323239 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-nb\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.323291 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-config\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.323356 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-swift-storage-0\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.323391 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbpb7\" (UniqueName: \"kubernetes.io/projected/0a428662-7264-4abe-837b-64739810c829-kube-api-access-xbpb7\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.323427 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-svc\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.323454 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-sb\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.324330 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-nb\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.324347 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-swift-storage-0\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.324519 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-sb\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.324893 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-config\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.326601 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-svc\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.358160 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbpb7\" (UniqueName: \"kubernetes.io/projected/0a428662-7264-4abe-837b-64739810c829-kube-api-access-xbpb7\") pod \"dnsmasq-dns-5d7f54fb65-6vndb\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:49 crc kubenswrapper[4940]: I1126 07:16:49.467587 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:50 crc kubenswrapper[4940]: I1126 07:16:50.071751 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7f54fb65-6vndb"] Nov 26 07:16:50 crc kubenswrapper[4940]: W1126 07:16:50.078179 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a428662_7264_4abe_837b_64739810c829.slice/crio-a472b77a91c351dc13cd9608ed15a6894a9a22674310fb4bfe746ed6d70c4082 WatchSource:0}: Error finding container a472b77a91c351dc13cd9608ed15a6894a9a22674310fb4bfe746ed6d70c4082: Status 404 returned error can't find the container with id a472b77a91c351dc13cd9608ed15a6894a9a22674310fb4bfe746ed6d70c4082 Nov 26 07:16:50 crc kubenswrapper[4940]: I1126 07:16:50.941634 4940 generic.go:334] "Generic (PLEG): container finished" podID="0a428662-7264-4abe-837b-64739810c829" containerID="8bb22126f7e8fb3758222b160d460d1e7ad358b951ff13819a57073af26e80b8" exitCode=0 Nov 26 07:16:50 crc kubenswrapper[4940]: I1126 07:16:50.941690 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" event={"ID":"0a428662-7264-4abe-837b-64739810c829","Type":"ContainerDied","Data":"8bb22126f7e8fb3758222b160d460d1e7ad358b951ff13819a57073af26e80b8"} Nov 26 07:16:50 crc kubenswrapper[4940]: I1126 07:16:50.942052 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" event={"ID":"0a428662-7264-4abe-837b-64739810c829","Type":"ContainerStarted","Data":"a472b77a91c351dc13cd9608ed15a6894a9a22674310fb4bfe746ed6d70c4082"} Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.401564 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.402203 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="ceilometer-central-agent" containerID="cri-o://33b1f8b85b8ab7ad46ce6f250c0fbfd265e172b17ae91c9d1e8b3193786e3da4" gracePeriod=30 Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.402317 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="proxy-httpd" containerID="cri-o://437d7372e0e4cf0691257be8373b33c9a94ddf4bba17e1269e87c1dcf7257e08" gracePeriod=30 Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.402359 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="sg-core" containerID="cri-o://8ab10c09df33d5a580ad2893f4497a7ba5796e1c50928856e44602f3b20c6568" gracePeriod=30 Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.402391 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="ceilometer-notification-agent" containerID="cri-o://e2b67954ae2a8283b16c7c53487cd1aaef4a4c26e1d91c7d7291bd5a523b56c8" gracePeriod=30 Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.600113 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.952511 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" event={"ID":"0a428662-7264-4abe-837b-64739810c829","Type":"ContainerStarted","Data":"05b9781e441ba29ec642551555aa92180638341b151ecdd169347a950574b1fc"} Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.952879 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.955576 4940 generic.go:334] "Generic (PLEG): container finished" podID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerID="437d7372e0e4cf0691257be8373b33c9a94ddf4bba17e1269e87c1dcf7257e08" exitCode=0 Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.955602 4940 generic.go:334] "Generic (PLEG): container finished" podID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerID="8ab10c09df33d5a580ad2893f4497a7ba5796e1c50928856e44602f3b20c6568" exitCode=2 Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.955609 4940 generic.go:334] "Generic (PLEG): container finished" podID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerID="33b1f8b85b8ab7ad46ce6f250c0fbfd265e172b17ae91c9d1e8b3193786e3da4" exitCode=0 Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.955626 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerDied","Data":"437d7372e0e4cf0691257be8373b33c9a94ddf4bba17e1269e87c1dcf7257e08"} Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.955644 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerDied","Data":"8ab10c09df33d5a580ad2893f4497a7ba5796e1c50928856e44602f3b20c6568"} Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.955655 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerDied","Data":"33b1f8b85b8ab7ad46ce6f250c0fbfd265e172b17ae91c9d1e8b3193786e3da4"} Nov 26 07:16:51 crc kubenswrapper[4940]: I1126 07:16:51.977180 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" podStartSLOduration=2.977161303 podStartE2EDuration="2.977161303s" podCreationTimestamp="2025-11-26 07:16:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:51.971564745 +0000 UTC m=+1313.491706374" watchObservedRunningTime="2025-11-26 07:16:51.977161303 +0000 UTC m=+1313.497302922" Nov 26 07:16:52 crc kubenswrapper[4940]: I1126 07:16:52.560252 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:52 crc kubenswrapper[4940]: I1126 07:16:52.560764 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-log" containerID="cri-o://34cc3b8ae2b6091c706beb0cb19a74c1b0fcf21737cf22107d472b58438c8454" gracePeriod=30 Nov 26 07:16:52 crc kubenswrapper[4940]: I1126 07:16:52.560892 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-api" containerID="cri-o://b0e53e491751fb29c142c49d72a4d6c4f53ce3a88478fa258f5ccfac8147f328" gracePeriod=30 Nov 26 07:16:52 crc kubenswrapper[4940]: I1126 07:16:52.965574 4940 generic.go:334] "Generic (PLEG): container finished" podID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerID="34cc3b8ae2b6091c706beb0cb19a74c1b0fcf21737cf22107d472b58438c8454" exitCode=143 Nov 26 07:16:52 crc kubenswrapper[4940]: I1126 07:16:52.965662 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4cfb4e54-3927-4275-930d-4922b57f7c81","Type":"ContainerDied","Data":"34cc3b8ae2b6091c706beb0cb19a74c1b0fcf21737cf22107d472b58438c8454"} Nov 26 07:16:55 crc kubenswrapper[4940]: I1126 07:16:55.994801 4940 generic.go:334] "Generic (PLEG): container finished" podID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerID="b0e53e491751fb29c142c49d72a4d6c4f53ce3a88478fa258f5ccfac8147f328" exitCode=0 Nov 26 07:16:55 crc kubenswrapper[4940]: I1126 07:16:55.994893 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4cfb4e54-3927-4275-930d-4922b57f7c81","Type":"ContainerDied","Data":"b0e53e491751fb29c142c49d72a4d6c4f53ce3a88478fa258f5ccfac8147f328"} Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.184786 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.256560 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cfb4e54-3927-4275-930d-4922b57f7c81-logs\") pod \"4cfb4e54-3927-4275-930d-4922b57f7c81\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.256704 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cdlb\" (UniqueName: \"kubernetes.io/projected/4cfb4e54-3927-4275-930d-4922b57f7c81-kube-api-access-9cdlb\") pod \"4cfb4e54-3927-4275-930d-4922b57f7c81\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.256789 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-combined-ca-bundle\") pod \"4cfb4e54-3927-4275-930d-4922b57f7c81\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.256877 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-config-data\") pod \"4cfb4e54-3927-4275-930d-4922b57f7c81\" (UID: \"4cfb4e54-3927-4275-930d-4922b57f7c81\") " Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.261151 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4cfb4e54-3927-4275-930d-4922b57f7c81-logs" (OuterVolumeSpecName: "logs") pod "4cfb4e54-3927-4275-930d-4922b57f7c81" (UID: "4cfb4e54-3927-4275-930d-4922b57f7c81"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.268472 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cfb4e54-3927-4275-930d-4922b57f7c81-kube-api-access-9cdlb" (OuterVolumeSpecName: "kube-api-access-9cdlb") pod "4cfb4e54-3927-4275-930d-4922b57f7c81" (UID: "4cfb4e54-3927-4275-930d-4922b57f7c81"). InnerVolumeSpecName "kube-api-access-9cdlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.311092 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4cfb4e54-3927-4275-930d-4922b57f7c81" (UID: "4cfb4e54-3927-4275-930d-4922b57f7c81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.342581 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-config-data" (OuterVolumeSpecName: "config-data") pod "4cfb4e54-3927-4275-930d-4922b57f7c81" (UID: "4cfb4e54-3927-4275-930d-4922b57f7c81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.359747 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cfb4e54-3927-4275-930d-4922b57f7c81-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.359781 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cdlb\" (UniqueName: \"kubernetes.io/projected/4cfb4e54-3927-4275-930d-4922b57f7c81-kube-api-access-9cdlb\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.359793 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.359802 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4cfb4e54-3927-4275-930d-4922b57f7c81-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.596293 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:56 crc kubenswrapper[4940]: I1126 07:16:56.616426 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.008328 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4cfb4e54-3927-4275-930d-4922b57f7c81","Type":"ContainerDied","Data":"d26dbdf1158592ee4873cbefc9fe43ea412c0c8870112ec0c3c7177b78b51d02"} Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.008393 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.008395 4940 scope.go:117] "RemoveContainer" containerID="b0e53e491751fb29c142c49d72a4d6c4f53ce3a88478fa258f5ccfac8147f328" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.029473 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.041395 4940 scope.go:117] "RemoveContainer" containerID="34cc3b8ae2b6091c706beb0cb19a74c1b0fcf21737cf22107d472b58438c8454" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.042216 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.053566 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.103658 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:57 crc kubenswrapper[4940]: E1126 07:16:57.104246 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-log" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.104279 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-log" Nov 26 07:16:57 crc kubenswrapper[4940]: E1126 07:16:57.104359 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-api" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.104374 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-api" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.104691 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-api" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.104752 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" containerName="nova-api-log" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.109434 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.113023 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.114413 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.114455 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.114674 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.177015 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cfb4e54-3927-4275-930d-4922b57f7c81" path="/var/lib/kubelet/pods/4cfb4e54-3927-4275-930d-4922b57f7c81/volumes" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.183945 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-public-tls-certs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.183995 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.184107 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.184126 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-config-data\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.184157 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9swh5\" (UniqueName: \"kubernetes.io/projected/2e7ab4cb-20db-474c-bcd3-2419c41db958-kube-api-access-9swh5\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.184176 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7ab4cb-20db-474c-bcd3-2419c41db958-logs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.225254 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-44sqk"] Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.226640 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.231243 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.231311 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.232897 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-44sqk"] Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286103 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-public-tls-certs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286149 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286185 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286253 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-config-data\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286276 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2nss\" (UniqueName: \"kubernetes.io/projected/37492950-3851-47fd-ae42-e9009bf4a7bf-kube-api-access-r2nss\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286296 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286315 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-config-data\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286342 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9swh5\" (UniqueName: \"kubernetes.io/projected/2e7ab4cb-20db-474c-bcd3-2419c41db958-kube-api-access-9swh5\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286365 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7ab4cb-20db-474c-bcd3-2419c41db958-logs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.286422 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-scripts\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.287238 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7ab4cb-20db-474c-bcd3-2419c41db958-logs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.291369 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-public-tls-certs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.291733 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-config-data\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.297903 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.302441 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.306445 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9swh5\" (UniqueName: \"kubernetes.io/projected/2e7ab4cb-20db-474c-bcd3-2419c41db958-kube-api-access-9swh5\") pod \"nova-api-0\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.388202 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.388627 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-config-data\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.388673 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2nss\" (UniqueName: \"kubernetes.io/projected/37492950-3851-47fd-ae42-e9009bf4a7bf-kube-api-access-r2nss\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.388807 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-scripts\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.393117 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-config-data\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.393490 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-scripts\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.404869 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.410410 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2nss\" (UniqueName: \"kubernetes.io/projected/37492950-3851-47fd-ae42-e9009bf4a7bf-kube-api-access-r2nss\") pod \"nova-cell1-cell-mapping-44sqk\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.435267 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.543698 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:16:57 crc kubenswrapper[4940]: I1126 07:16:57.900741 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:16:58 crc kubenswrapper[4940]: I1126 07:16:58.025252 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2e7ab4cb-20db-474c-bcd3-2419c41db958","Type":"ContainerStarted","Data":"951729de82f668249affd13fa3010f25080e5ff5b9afd355f83e577a871cc72d"} Nov 26 07:16:58 crc kubenswrapper[4940]: I1126 07:16:58.055200 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-44sqk"] Nov 26 07:16:58 crc kubenswrapper[4940]: W1126 07:16:58.068461 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37492950_3851_47fd_ae42_e9009bf4a7bf.slice/crio-4e55781e258b34449e55ad3510092ee59aef6cc5cc4d6a17277032d71aae086d WatchSource:0}: Error finding container 4e55781e258b34449e55ad3510092ee59aef6cc5cc4d6a17277032d71aae086d: Status 404 returned error can't find the container with id 4e55781e258b34449e55ad3510092ee59aef6cc5cc4d6a17277032d71aae086d Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.048228 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-44sqk" event={"ID":"37492950-3851-47fd-ae42-e9009bf4a7bf","Type":"ContainerStarted","Data":"5ebd02f88b9688b1e58f4c5bef3d10671c9d5b3d57bbdf96a5f704fe945e3bba"} Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.048770 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-44sqk" event={"ID":"37492950-3851-47fd-ae42-e9009bf4a7bf","Type":"ContainerStarted","Data":"4e55781e258b34449e55ad3510092ee59aef6cc5cc4d6a17277032d71aae086d"} Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.051426 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2e7ab4cb-20db-474c-bcd3-2419c41db958","Type":"ContainerStarted","Data":"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902"} Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.051451 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2e7ab4cb-20db-474c-bcd3-2419c41db958","Type":"ContainerStarted","Data":"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70"} Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.078528 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-44sqk" podStartSLOduration=2.078498369 podStartE2EDuration="2.078498369s" podCreationTimestamp="2025-11-26 07:16:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:59.068487511 +0000 UTC m=+1320.588629120" watchObservedRunningTime="2025-11-26 07:16:59.078498369 +0000 UTC m=+1320.598640018" Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.088100 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.088087093 podStartE2EDuration="2.088087093s" podCreationTimestamp="2025-11-26 07:16:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:16:59.088076412 +0000 UTC m=+1320.608218071" watchObservedRunningTime="2025-11-26 07:16:59.088087093 +0000 UTC m=+1320.608228712" Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.470197 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.566554 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dd7c4987f-nvvnw"] Nov 26 07:16:59 crc kubenswrapper[4940]: I1126 07:16:59.566826 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" podUID="455e0122-ecd7-4a3a-97be-fb47b8910025" containerName="dnsmasq-dns" containerID="cri-o://e0c636b12aa08795b31b27a1d917b15d8b732238083a8564a42737a92c658225" gracePeriod=10 Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.061918 4940 generic.go:334] "Generic (PLEG): container finished" podID="455e0122-ecd7-4a3a-97be-fb47b8910025" containerID="e0c636b12aa08795b31b27a1d917b15d8b732238083a8564a42737a92c658225" exitCode=0 Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.061989 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" event={"ID":"455e0122-ecd7-4a3a-97be-fb47b8910025","Type":"ContainerDied","Data":"e0c636b12aa08795b31b27a1d917b15d8b732238083a8564a42737a92c658225"} Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.062475 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" event={"ID":"455e0122-ecd7-4a3a-97be-fb47b8910025","Type":"ContainerDied","Data":"9c9fc233627c9214c079faa6a46361b6a620342a1710d9ea80eae869af0f4028"} Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.062490 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c9fc233627c9214c079faa6a46361b6a620342a1710d9ea80eae869af0f4028" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.111011 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.158735 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-swift-storage-0\") pod \"455e0122-ecd7-4a3a-97be-fb47b8910025\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.158807 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-config\") pod \"455e0122-ecd7-4a3a-97be-fb47b8910025\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.158894 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svz7p\" (UniqueName: \"kubernetes.io/projected/455e0122-ecd7-4a3a-97be-fb47b8910025-kube-api-access-svz7p\") pod \"455e0122-ecd7-4a3a-97be-fb47b8910025\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.158962 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-nb\") pod \"455e0122-ecd7-4a3a-97be-fb47b8910025\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.159017 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-svc\") pod \"455e0122-ecd7-4a3a-97be-fb47b8910025\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.159159 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-sb\") pod \"455e0122-ecd7-4a3a-97be-fb47b8910025\" (UID: \"455e0122-ecd7-4a3a-97be-fb47b8910025\") " Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.188343 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/455e0122-ecd7-4a3a-97be-fb47b8910025-kube-api-access-svz7p" (OuterVolumeSpecName: "kube-api-access-svz7p") pod "455e0122-ecd7-4a3a-97be-fb47b8910025" (UID: "455e0122-ecd7-4a3a-97be-fb47b8910025"). InnerVolumeSpecName "kube-api-access-svz7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.230799 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "455e0122-ecd7-4a3a-97be-fb47b8910025" (UID: "455e0122-ecd7-4a3a-97be-fb47b8910025"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.261450 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svz7p\" (UniqueName: \"kubernetes.io/projected/455e0122-ecd7-4a3a-97be-fb47b8910025-kube-api-access-svz7p\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.261478 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.266815 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-config" (OuterVolumeSpecName: "config") pod "455e0122-ecd7-4a3a-97be-fb47b8910025" (UID: "455e0122-ecd7-4a3a-97be-fb47b8910025"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.267861 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "455e0122-ecd7-4a3a-97be-fb47b8910025" (UID: "455e0122-ecd7-4a3a-97be-fb47b8910025"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.278923 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "455e0122-ecd7-4a3a-97be-fb47b8910025" (UID: "455e0122-ecd7-4a3a-97be-fb47b8910025"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.283524 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "455e0122-ecd7-4a3a-97be-fb47b8910025" (UID: "455e0122-ecd7-4a3a-97be-fb47b8910025"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.363486 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.363723 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.363800 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:00 crc kubenswrapper[4940]: I1126 07:17:00.363869 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/455e0122-ecd7-4a3a-97be-fb47b8910025-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.077955 4940 generic.go:334] "Generic (PLEG): container finished" podID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerID="e2b67954ae2a8283b16c7c53487cd1aaef4a4c26e1d91c7d7291bd5a523b56c8" exitCode=0 Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.078077 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerDied","Data":"e2b67954ae2a8283b16c7c53487cd1aaef4a4c26e1d91c7d7291bd5a523b56c8"} Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.078267 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dd7c4987f-nvvnw" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.134107 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dd7c4987f-nvvnw"] Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.150495 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5dd7c4987f-nvvnw"] Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.210136 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="455e0122-ecd7-4a3a-97be-fb47b8910025" path="/var/lib/kubelet/pods/455e0122-ecd7-4a3a-97be-fb47b8910025/volumes" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.435799 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.484883 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-log-httpd\") pod \"b356de7e-d5db-470d-863d-21a4dae15a4c\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.485323 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-combined-ca-bundle\") pod \"b356de7e-d5db-470d-863d-21a4dae15a4c\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.486156 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-run-httpd\") pod \"b356de7e-d5db-470d-863d-21a4dae15a4c\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.486354 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-ceilometer-tls-certs\") pod \"b356de7e-d5db-470d-863d-21a4dae15a4c\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.486516 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-config-data\") pod \"b356de7e-d5db-470d-863d-21a4dae15a4c\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.486623 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6wn8\" (UniqueName: \"kubernetes.io/projected/b356de7e-d5db-470d-863d-21a4dae15a4c-kube-api-access-j6wn8\") pod \"b356de7e-d5db-470d-863d-21a4dae15a4c\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.486799 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-scripts\") pod \"b356de7e-d5db-470d-863d-21a4dae15a4c\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.486905 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-sg-core-conf-yaml\") pod \"b356de7e-d5db-470d-863d-21a4dae15a4c\" (UID: \"b356de7e-d5db-470d-863d-21a4dae15a4c\") " Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.485487 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b356de7e-d5db-470d-863d-21a4dae15a4c" (UID: "b356de7e-d5db-470d-863d-21a4dae15a4c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.499312 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b356de7e-d5db-470d-863d-21a4dae15a4c" (UID: "b356de7e-d5db-470d-863d-21a4dae15a4c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.501124 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b356de7e-d5db-470d-863d-21a4dae15a4c-kube-api-access-j6wn8" (OuterVolumeSpecName: "kube-api-access-j6wn8") pod "b356de7e-d5db-470d-863d-21a4dae15a4c" (UID: "b356de7e-d5db-470d-863d-21a4dae15a4c"). InnerVolumeSpecName "kube-api-access-j6wn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.514021 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-scripts" (OuterVolumeSpecName: "scripts") pod "b356de7e-d5db-470d-863d-21a4dae15a4c" (UID: "b356de7e-d5db-470d-863d-21a4dae15a4c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.588987 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b356de7e-d5db-470d-863d-21a4dae15a4c" (UID: "b356de7e-d5db-470d-863d-21a4dae15a4c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.589330 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6wn8\" (UniqueName: \"kubernetes.io/projected/b356de7e-d5db-470d-863d-21a4dae15a4c-kube-api-access-j6wn8\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.589405 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.589462 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.589521 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.589595 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b356de7e-d5db-470d-863d-21a4dae15a4c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.612664 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "b356de7e-d5db-470d-863d-21a4dae15a4c" (UID: "b356de7e-d5db-470d-863d-21a4dae15a4c"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.623857 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b356de7e-d5db-470d-863d-21a4dae15a4c" (UID: "b356de7e-d5db-470d-863d-21a4dae15a4c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.659729 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-config-data" (OuterVolumeSpecName: "config-data") pod "b356de7e-d5db-470d-863d-21a4dae15a4c" (UID: "b356de7e-d5db-470d-863d-21a4dae15a4c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.691618 4940 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.691850 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:01 crc kubenswrapper[4940]: I1126 07:17:01.691923 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b356de7e-d5db-470d-863d-21a4dae15a4c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.089708 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b356de7e-d5db-470d-863d-21a4dae15a4c","Type":"ContainerDied","Data":"44886a4c4429c3d8471d252a5a606731a204580ecc3fd957428bbb49a4472606"} Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.089769 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.089778 4940 scope.go:117] "RemoveContainer" containerID="437d7372e0e4cf0691257be8373b33c9a94ddf4bba17e1269e87c1dcf7257e08" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.125558 4940 scope.go:117] "RemoveContainer" containerID="8ab10c09df33d5a580ad2893f4497a7ba5796e1c50928856e44602f3b20c6568" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.126318 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.134416 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.148501 4940 scope.go:117] "RemoveContainer" containerID="e2b67954ae2a8283b16c7c53487cd1aaef4a4c26e1d91c7d7291bd5a523b56c8" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.157707 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:17:02 crc kubenswrapper[4940]: E1126 07:17:02.158233 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="455e0122-ecd7-4a3a-97be-fb47b8910025" containerName="dnsmasq-dns" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158257 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="455e0122-ecd7-4a3a-97be-fb47b8910025" containerName="dnsmasq-dns" Nov 26 07:17:02 crc kubenswrapper[4940]: E1126 07:17:02.158278 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="sg-core" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158286 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="sg-core" Nov 26 07:17:02 crc kubenswrapper[4940]: E1126 07:17:02.158306 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="ceilometer-central-agent" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158316 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="ceilometer-central-agent" Nov 26 07:17:02 crc kubenswrapper[4940]: E1126 07:17:02.158330 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="ceilometer-notification-agent" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158338 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="ceilometer-notification-agent" Nov 26 07:17:02 crc kubenswrapper[4940]: E1126 07:17:02.158357 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="455e0122-ecd7-4a3a-97be-fb47b8910025" containerName="init" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158365 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="455e0122-ecd7-4a3a-97be-fb47b8910025" containerName="init" Nov 26 07:17:02 crc kubenswrapper[4940]: E1126 07:17:02.158393 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="proxy-httpd" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158401 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="proxy-httpd" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158632 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="proxy-httpd" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158662 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="ceilometer-notification-agent" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158673 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="455e0122-ecd7-4a3a-97be-fb47b8910025" containerName="dnsmasq-dns" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158689 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="ceilometer-central-agent" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.158710 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" containerName="sg-core" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.162574 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.168175 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.168514 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.168596 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.168993 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.192363 4940 scope.go:117] "RemoveContainer" containerID="33b1f8b85b8ab7ad46ce6f250c0fbfd265e172b17ae91c9d1e8b3193786e3da4" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.200693 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-run-httpd\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.200727 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-scripts\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.200779 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.200808 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.200879 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q4jw\" (UniqueName: \"kubernetes.io/projected/5ac14fd0-8273-436b-89b8-a1478aaa226d-kube-api-access-6q4jw\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.200917 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-log-httpd\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.200962 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.201151 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-config-data\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.302417 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6q4jw\" (UniqueName: \"kubernetes.io/projected/5ac14fd0-8273-436b-89b8-a1478aaa226d-kube-api-access-6q4jw\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.302486 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-log-httpd\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.302539 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.302575 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-config-data\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.302601 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-run-httpd\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.302620 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-scripts\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.302657 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.302680 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.303827 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-log-httpd\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.304342 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-run-httpd\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.307855 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.309094 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.309460 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-scripts\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.309420 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-config-data\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.315433 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.323190 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q4jw\" (UniqueName: \"kubernetes.io/projected/5ac14fd0-8273-436b-89b8-a1478aaa226d-kube-api-access-6q4jw\") pod \"ceilometer-0\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.481585 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:17:02 crc kubenswrapper[4940]: I1126 07:17:02.974172 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:17:02 crc kubenswrapper[4940]: W1126 07:17:02.978865 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ac14fd0_8273_436b_89b8_a1478aaa226d.slice/crio-4d764f9bbe565c918e44127af2ec29d02577a905188f2cf547aa6a2e8d122bce WatchSource:0}: Error finding container 4d764f9bbe565c918e44127af2ec29d02577a905188f2cf547aa6a2e8d122bce: Status 404 returned error can't find the container with id 4d764f9bbe565c918e44127af2ec29d02577a905188f2cf547aa6a2e8d122bce Nov 26 07:17:03 crc kubenswrapper[4940]: I1126 07:17:03.100525 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerStarted","Data":"4d764f9bbe565c918e44127af2ec29d02577a905188f2cf547aa6a2e8d122bce"} Nov 26 07:17:03 crc kubenswrapper[4940]: I1126 07:17:03.178124 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b356de7e-d5db-470d-863d-21a4dae15a4c" path="/var/lib/kubelet/pods/b356de7e-d5db-470d-863d-21a4dae15a4c/volumes" Nov 26 07:17:04 crc kubenswrapper[4940]: I1126 07:17:04.114856 4940 generic.go:334] "Generic (PLEG): container finished" podID="37492950-3851-47fd-ae42-e9009bf4a7bf" containerID="5ebd02f88b9688b1e58f4c5bef3d10671c9d5b3d57bbdf96a5f704fe945e3bba" exitCode=0 Nov 26 07:17:04 crc kubenswrapper[4940]: I1126 07:17:04.114887 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-44sqk" event={"ID":"37492950-3851-47fd-ae42-e9009bf4a7bf","Type":"ContainerDied","Data":"5ebd02f88b9688b1e58f4c5bef3d10671c9d5b3d57bbdf96a5f704fe945e3bba"} Nov 26 07:17:04 crc kubenswrapper[4940]: I1126 07:17:04.118676 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerStarted","Data":"aa12214a40e17b396195f7c3910988635f60c08b39041e13aa87c7d6cc3c84be"} Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.130704 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerStarted","Data":"5fcae63004fc694c125bd0895a86667cfe1b52745e785eba586d24f5dff5b67d"} Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.520895 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.568749 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-combined-ca-bundle\") pod \"37492950-3851-47fd-ae42-e9009bf4a7bf\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.568850 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-config-data\") pod \"37492950-3851-47fd-ae42-e9009bf4a7bf\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.568890 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-scripts\") pod \"37492950-3851-47fd-ae42-e9009bf4a7bf\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.569069 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2nss\" (UniqueName: \"kubernetes.io/projected/37492950-3851-47fd-ae42-e9009bf4a7bf-kube-api-access-r2nss\") pod \"37492950-3851-47fd-ae42-e9009bf4a7bf\" (UID: \"37492950-3851-47fd-ae42-e9009bf4a7bf\") " Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.575212 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37492950-3851-47fd-ae42-e9009bf4a7bf-kube-api-access-r2nss" (OuterVolumeSpecName: "kube-api-access-r2nss") pod "37492950-3851-47fd-ae42-e9009bf4a7bf" (UID: "37492950-3851-47fd-ae42-e9009bf4a7bf"). InnerVolumeSpecName "kube-api-access-r2nss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.597059 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-scripts" (OuterVolumeSpecName: "scripts") pod "37492950-3851-47fd-ae42-e9009bf4a7bf" (UID: "37492950-3851-47fd-ae42-e9009bf4a7bf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.617730 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-config-data" (OuterVolumeSpecName: "config-data") pod "37492950-3851-47fd-ae42-e9009bf4a7bf" (UID: "37492950-3851-47fd-ae42-e9009bf4a7bf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.618324 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37492950-3851-47fd-ae42-e9009bf4a7bf" (UID: "37492950-3851-47fd-ae42-e9009bf4a7bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.671069 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.671287 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2nss\" (UniqueName: \"kubernetes.io/projected/37492950-3851-47fd-ae42-e9009bf4a7bf-kube-api-access-r2nss\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.671300 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:05 crc kubenswrapper[4940]: I1126 07:17:05.671310 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37492950-3851-47fd-ae42-e9009bf4a7bf-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.142062 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-44sqk" event={"ID":"37492950-3851-47fd-ae42-e9009bf4a7bf","Type":"ContainerDied","Data":"4e55781e258b34449e55ad3510092ee59aef6cc5cc4d6a17277032d71aae086d"} Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.142110 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e55781e258b34449e55ad3510092ee59aef6cc5cc4d6a17277032d71aae086d" Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.142120 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-44sqk" Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.144533 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerStarted","Data":"54bb0d6665161c60a8df29f40f934d0511950d32bca2d3f2401e1f8db929d7db"} Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.326799 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.327205 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="2fbd518d-ce50-4811-8dc1-74d12a09eb8c" containerName="nova-scheduler-scheduler" containerID="cri-o://55a9ba5541c0f40f9c714ac19a220739b7c722566d746d2644ec3d3b672ef513" gracePeriod=30 Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.345092 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.345425 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerName="nova-api-log" containerID="cri-o://f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70" gracePeriod=30 Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.345594 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerName="nova-api-api" containerID="cri-o://afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902" gracePeriod=30 Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.362050 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.362311 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-log" containerID="cri-o://c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7" gracePeriod=30 Nov 26 07:17:06 crc kubenswrapper[4940]: I1126 07:17:06.362435 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-metadata" containerID="cri-o://20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf" gracePeriod=30 Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.011378 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.094731 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-internal-tls-certs\") pod \"2e7ab4cb-20db-474c-bcd3-2419c41db958\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.094797 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-public-tls-certs\") pod \"2e7ab4cb-20db-474c-bcd3-2419c41db958\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.094855 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9swh5\" (UniqueName: \"kubernetes.io/projected/2e7ab4cb-20db-474c-bcd3-2419c41db958-kube-api-access-9swh5\") pod \"2e7ab4cb-20db-474c-bcd3-2419c41db958\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.094890 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-config-data\") pod \"2e7ab4cb-20db-474c-bcd3-2419c41db958\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.094922 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7ab4cb-20db-474c-bcd3-2419c41db958-logs\") pod \"2e7ab4cb-20db-474c-bcd3-2419c41db958\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.094951 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-combined-ca-bundle\") pod \"2e7ab4cb-20db-474c-bcd3-2419c41db958\" (UID: \"2e7ab4cb-20db-474c-bcd3-2419c41db958\") " Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.096076 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e7ab4cb-20db-474c-bcd3-2419c41db958-logs" (OuterVolumeSpecName: "logs") pod "2e7ab4cb-20db-474c-bcd3-2419c41db958" (UID: "2e7ab4cb-20db-474c-bcd3-2419c41db958"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.102175 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e7ab4cb-20db-474c-bcd3-2419c41db958-kube-api-access-9swh5" (OuterVolumeSpecName: "kube-api-access-9swh5") pod "2e7ab4cb-20db-474c-bcd3-2419c41db958" (UID: "2e7ab4cb-20db-474c-bcd3-2419c41db958"). InnerVolumeSpecName "kube-api-access-9swh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.127833 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2e7ab4cb-20db-474c-bcd3-2419c41db958" (UID: "2e7ab4cb-20db-474c-bcd3-2419c41db958"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.140530 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-config-data" (OuterVolumeSpecName: "config-data") pod "2e7ab4cb-20db-474c-bcd3-2419c41db958" (UID: "2e7ab4cb-20db-474c-bcd3-2419c41db958"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.145512 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "2e7ab4cb-20db-474c-bcd3-2419c41db958" (UID: "2e7ab4cb-20db-474c-bcd3-2419c41db958"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.153238 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2e7ab4cb-20db-474c-bcd3-2419c41db958" (UID: "2e7ab4cb-20db-474c-bcd3-2419c41db958"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.158207 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerStarted","Data":"9a54934efee8d36f8fe7bb895361ca1c8eea974267d635a96b512ed26e1494b7"} Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.159233 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.162205 4940 generic.go:334] "Generic (PLEG): container finished" podID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerID="afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902" exitCode=0 Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.162237 4940 generic.go:334] "Generic (PLEG): container finished" podID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerID="f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70" exitCode=143 Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.162311 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2e7ab4cb-20db-474c-bcd3-2419c41db958","Type":"ContainerDied","Data":"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902"} Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.162338 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2e7ab4cb-20db-474c-bcd3-2419c41db958","Type":"ContainerDied","Data":"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70"} Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.162342 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.162348 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2e7ab4cb-20db-474c-bcd3-2419c41db958","Type":"ContainerDied","Data":"951729de82f668249affd13fa3010f25080e5ff5b9afd355f83e577a871cc72d"} Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.162385 4940 scope.go:117] "RemoveContainer" containerID="afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.164376 4940 generic.go:334] "Generic (PLEG): container finished" podID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerID="c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7" exitCode=143 Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.164398 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2752e9c9-51d4-4e2c-9f07-c87e36d243ee","Type":"ContainerDied","Data":"c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7"} Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.192796 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9580327579999999 podStartE2EDuration="5.192571184s" podCreationTimestamp="2025-11-26 07:17:02 +0000 UTC" firstStartedPulling="2025-11-26 07:17:02.981365342 +0000 UTC m=+1324.501506971" lastFinishedPulling="2025-11-26 07:17:06.215903778 +0000 UTC m=+1327.736045397" observedRunningTime="2025-11-26 07:17:07.178921331 +0000 UTC m=+1328.699062950" watchObservedRunningTime="2025-11-26 07:17:07.192571184 +0000 UTC m=+1328.712712813" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.200014 4940 scope.go:117] "RemoveContainer" containerID="f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.202896 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.202925 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.202937 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9swh5\" (UniqueName: \"kubernetes.io/projected/2e7ab4cb-20db-474c-bcd3-2419c41db958-kube-api-access-9swh5\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.202951 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.202963 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e7ab4cb-20db-474c-bcd3-2419c41db958-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.202971 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e7ab4cb-20db-474c-bcd3-2419c41db958-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.229272 4940 scope.go:117] "RemoveContainer" containerID="afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902" Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.231392 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902\": container with ID starting with afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902 not found: ID does not exist" containerID="afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.231441 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902"} err="failed to get container status \"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902\": rpc error: code = NotFound desc = could not find container \"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902\": container with ID starting with afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902 not found: ID does not exist" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.231469 4940 scope.go:117] "RemoveContainer" containerID="f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.232222 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.232392 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70\": container with ID starting with f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70 not found: ID does not exist" containerID="f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.232429 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70"} err="failed to get container status \"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70\": rpc error: code = NotFound desc = could not find container \"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70\": container with ID starting with f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70 not found: ID does not exist" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.232449 4940 scope.go:117] "RemoveContainer" containerID="afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.232850 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902"} err="failed to get container status \"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902\": rpc error: code = NotFound desc = could not find container \"afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902\": container with ID starting with afcdf14b880e7390d457c50592e4dbfae86a713d332cc34e2c593771da762902 not found: ID does not exist" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.232891 4940 scope.go:117] "RemoveContainer" containerID="f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.233168 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70"} err="failed to get container status \"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70\": rpc error: code = NotFound desc = could not find container \"f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70\": container with ID starting with f18fc35937301db6d148d5aa0479d1793e41e84264dbb37642935013c19a3d70 not found: ID does not exist" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.243554 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.256185 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.256628 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37492950-3851-47fd-ae42-e9009bf4a7bf" containerName="nova-manage" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.256653 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="37492950-3851-47fd-ae42-e9009bf4a7bf" containerName="nova-manage" Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.256672 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerName="nova-api-api" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.256679 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerName="nova-api-api" Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.256695 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerName="nova-api-log" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.256701 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerName="nova-api-log" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.256918 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerName="nova-api-log" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.256950 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="37492950-3851-47fd-ae42-e9009bf4a7bf" containerName="nova-manage" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.256984 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" containerName="nova-api-api" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.258014 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.259984 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.260258 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.260408 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.266241 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.305782 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-config-data\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.305933 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.306134 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.306270 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-public-tls-certs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.306318 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/310cee15-c1c5-4db3-8900-ca3107ba130d-logs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.308111 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h75zl\" (UniqueName: \"kubernetes.io/projected/310cee15-c1c5-4db3-8900-ca3107ba130d-kube-api-access-h75zl\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.380542 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="55a9ba5541c0f40f9c714ac19a220739b7c722566d746d2644ec3d3b672ef513" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.382187 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="55a9ba5541c0f40f9c714ac19a220739b7c722566d746d2644ec3d3b672ef513" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.383339 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="55a9ba5541c0f40f9c714ac19a220739b7c722566d746d2644ec3d3b672ef513" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:17:07 crc kubenswrapper[4940]: E1126 07:17:07.383370 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="2fbd518d-ce50-4811-8dc1-74d12a09eb8c" containerName="nova-scheduler-scheduler" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.409769 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.409875 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-public-tls-certs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.409917 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/310cee15-c1c5-4db3-8900-ca3107ba130d-logs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.409953 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h75zl\" (UniqueName: \"kubernetes.io/projected/310cee15-c1c5-4db3-8900-ca3107ba130d-kube-api-access-h75zl\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.409997 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-config-data\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.410060 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.411164 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/310cee15-c1c5-4db3-8900-ca3107ba130d-logs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.414106 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.417590 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.419537 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-config-data\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.422800 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-public-tls-certs\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.434778 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h75zl\" (UniqueName: \"kubernetes.io/projected/310cee15-c1c5-4db3-8900-ca3107ba130d-kube-api-access-h75zl\") pod \"nova-api-0\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " pod="openstack/nova-api-0" Nov 26 07:17:07 crc kubenswrapper[4940]: I1126 07:17:07.575847 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:17:08 crc kubenswrapper[4940]: I1126 07:17:08.037581 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:17:08 crc kubenswrapper[4940]: I1126 07:17:08.186611 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"310cee15-c1c5-4db3-8900-ca3107ba130d","Type":"ContainerStarted","Data":"be27ab6856f9259db0d079afe6e4d935a9c48401811e84fa8b80fc98686c9af4"} Nov 26 07:17:09 crc kubenswrapper[4940]: I1126 07:17:09.185361 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e7ab4cb-20db-474c-bcd3-2419c41db958" path="/var/lib/kubelet/pods/2e7ab4cb-20db-474c-bcd3-2419c41db958/volumes" Nov 26 07:17:09 crc kubenswrapper[4940]: I1126 07:17:09.206054 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"310cee15-c1c5-4db3-8900-ca3107ba130d","Type":"ContainerStarted","Data":"62fc80217f3946465f0f5d0d83b59bf3c3a8c7cea1b4b8754c8843a8b67e8e47"} Nov 26 07:17:09 crc kubenswrapper[4940]: I1126 07:17:09.206102 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"310cee15-c1c5-4db3-8900-ca3107ba130d","Type":"ContainerStarted","Data":"74f237af1603b7679312e5f0ffe714ea2bf8e1fd4fcf073910b8ab40cc83aaff"} Nov 26 07:17:09 crc kubenswrapper[4940]: I1126 07:17:09.236867 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.23684918 podStartE2EDuration="2.23684918s" podCreationTimestamp="2025-11-26 07:17:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:17:09.232454501 +0000 UTC m=+1330.752596200" watchObservedRunningTime="2025-11-26 07:17:09.23684918 +0000 UTC m=+1330.756990799" Nov 26 07:17:09 crc kubenswrapper[4940]: I1126 07:17:09.511512 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": read tcp 10.217.0.2:60572->10.217.0.191:8775: read: connection reset by peer" Nov 26 07:17:09 crc kubenswrapper[4940]: I1126 07:17:09.511523 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": read tcp 10.217.0.2:60582->10.217.0.191:8775: read: connection reset by peer" Nov 26 07:17:09 crc kubenswrapper[4940]: I1126 07:17:09.990935 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.053123 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-combined-ca-bundle\") pod \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.053223 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-config-data\") pod \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.053258 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w48qb\" (UniqueName: \"kubernetes.io/projected/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-kube-api-access-w48qb\") pod \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.053380 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-logs\") pod \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.053396 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-nova-metadata-tls-certs\") pod \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\" (UID: \"2752e9c9-51d4-4e2c-9f07-c87e36d243ee\") " Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.053747 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-logs" (OuterVolumeSpecName: "logs") pod "2752e9c9-51d4-4e2c-9f07-c87e36d243ee" (UID: "2752e9c9-51d4-4e2c-9f07-c87e36d243ee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.066503 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-kube-api-access-w48qb" (OuterVolumeSpecName: "kube-api-access-w48qb") pod "2752e9c9-51d4-4e2c-9f07-c87e36d243ee" (UID: "2752e9c9-51d4-4e2c-9f07-c87e36d243ee"). InnerVolumeSpecName "kube-api-access-w48qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.087220 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-config-data" (OuterVolumeSpecName: "config-data") pod "2752e9c9-51d4-4e2c-9f07-c87e36d243ee" (UID: "2752e9c9-51d4-4e2c-9f07-c87e36d243ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.091926 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2752e9c9-51d4-4e2c-9f07-c87e36d243ee" (UID: "2752e9c9-51d4-4e2c-9f07-c87e36d243ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.121257 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "2752e9c9-51d4-4e2c-9f07-c87e36d243ee" (UID: "2752e9c9-51d4-4e2c-9f07-c87e36d243ee"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.155593 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.155632 4940 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.155646 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.155655 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.155664 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w48qb\" (UniqueName: \"kubernetes.io/projected/2752e9c9-51d4-4e2c-9f07-c87e36d243ee-kube-api-access-w48qb\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.218784 4940 generic.go:334] "Generic (PLEG): container finished" podID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerID="20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf" exitCode=0 Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.218837 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2752e9c9-51d4-4e2c-9f07-c87e36d243ee","Type":"ContainerDied","Data":"20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf"} Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.218886 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2752e9c9-51d4-4e2c-9f07-c87e36d243ee","Type":"ContainerDied","Data":"f4235c870573402e91de3990cae0827d3b62e4889b16b999936cd3f2f2f49080"} Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.218905 4940 scope.go:117] "RemoveContainer" containerID="20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.218854 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.250668 4940 scope.go:117] "RemoveContainer" containerID="c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.257520 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.268505 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.277508 4940 scope.go:117] "RemoveContainer" containerID="20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf" Nov 26 07:17:10 crc kubenswrapper[4940]: E1126 07:17:10.277951 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf\": container with ID starting with 20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf not found: ID does not exist" containerID="20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.277979 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf"} err="failed to get container status \"20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf\": rpc error: code = NotFound desc = could not find container \"20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf\": container with ID starting with 20daa2d563aca3bd7eb31439b9af64145eebadec6451d61aceb39f70a85112bf not found: ID does not exist" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.278021 4940 scope.go:117] "RemoveContainer" containerID="c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7" Nov 26 07:17:10 crc kubenswrapper[4940]: E1126 07:17:10.280767 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7\": container with ID starting with c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7 not found: ID does not exist" containerID="c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.280820 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7"} err="failed to get container status \"c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7\": rpc error: code = NotFound desc = could not find container \"c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7\": container with ID starting with c4586e05e518ddd72de673e807fe82b11bf3f99afaab629bc268722faaac42f7 not found: ID does not exist" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.282142 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:10 crc kubenswrapper[4940]: E1126 07:17:10.282825 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-log" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.282852 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-log" Nov 26 07:17:10 crc kubenswrapper[4940]: E1126 07:17:10.282873 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-metadata" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.282884 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-metadata" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.283147 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-metadata" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.283182 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" containerName="nova-metadata-log" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.284452 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.289848 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.290006 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.296572 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.360077 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.360157 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pnls\" (UniqueName: \"kubernetes.io/projected/63d173d6-a7cc-42f3-806d-50b9c8f8b189-kube-api-access-5pnls\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.360322 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63d173d6-a7cc-42f3-806d-50b9c8f8b189-logs\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.360689 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-config-data\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.360897 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.462707 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.462797 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pnls\" (UniqueName: \"kubernetes.io/projected/63d173d6-a7cc-42f3-806d-50b9c8f8b189-kube-api-access-5pnls\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.462817 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.462836 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63d173d6-a7cc-42f3-806d-50b9c8f8b189-logs\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.462942 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-config-data\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.463481 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63d173d6-a7cc-42f3-806d-50b9c8f8b189-logs\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.467175 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.481297 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-config-data\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.481486 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.490981 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pnls\" (UniqueName: \"kubernetes.io/projected/63d173d6-a7cc-42f3-806d-50b9c8f8b189-kube-api-access-5pnls\") pod \"nova-metadata-0\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " pod="openstack/nova-metadata-0" Nov 26 07:17:10 crc kubenswrapper[4940]: I1126 07:17:10.607003 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:17:11 crc kubenswrapper[4940]: W1126 07:17:11.112002 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63d173d6_a7cc_42f3_806d_50b9c8f8b189.slice/crio-be89b3086a71199cabd18e4100f1de14477eb5759b5d58a8a68f0e56d56285ee WatchSource:0}: Error finding container be89b3086a71199cabd18e4100f1de14477eb5759b5d58a8a68f0e56d56285ee: Status 404 returned error can't find the container with id be89b3086a71199cabd18e4100f1de14477eb5759b5d58a8a68f0e56d56285ee Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.114793 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.178167 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2752e9c9-51d4-4e2c-9f07-c87e36d243ee" path="/var/lib/kubelet/pods/2752e9c9-51d4-4e2c-9f07-c87e36d243ee/volumes" Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.228115 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"63d173d6-a7cc-42f3-806d-50b9c8f8b189","Type":"ContainerStarted","Data":"be89b3086a71199cabd18e4100f1de14477eb5759b5d58a8a68f0e56d56285ee"} Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.236487 4940 generic.go:334] "Generic (PLEG): container finished" podID="2fbd518d-ce50-4811-8dc1-74d12a09eb8c" containerID="55a9ba5541c0f40f9c714ac19a220739b7c722566d746d2644ec3d3b672ef513" exitCode=0 Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.236549 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fbd518d-ce50-4811-8dc1-74d12a09eb8c","Type":"ContainerDied","Data":"55a9ba5541c0f40f9c714ac19a220739b7c722566d746d2644ec3d3b672ef513"} Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.481441 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.580904 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwkqf\" (UniqueName: \"kubernetes.io/projected/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-kube-api-access-nwkqf\") pod \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.581022 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-combined-ca-bundle\") pod \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.581131 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-config-data\") pod \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\" (UID: \"2fbd518d-ce50-4811-8dc1-74d12a09eb8c\") " Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.586901 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-kube-api-access-nwkqf" (OuterVolumeSpecName: "kube-api-access-nwkqf") pod "2fbd518d-ce50-4811-8dc1-74d12a09eb8c" (UID: "2fbd518d-ce50-4811-8dc1-74d12a09eb8c"). InnerVolumeSpecName "kube-api-access-nwkqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.608843 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-config-data" (OuterVolumeSpecName: "config-data") pod "2fbd518d-ce50-4811-8dc1-74d12a09eb8c" (UID: "2fbd518d-ce50-4811-8dc1-74d12a09eb8c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.614331 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2fbd518d-ce50-4811-8dc1-74d12a09eb8c" (UID: "2fbd518d-ce50-4811-8dc1-74d12a09eb8c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.682931 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwkqf\" (UniqueName: \"kubernetes.io/projected/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-kube-api-access-nwkqf\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.682976 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:11 crc kubenswrapper[4940]: I1126 07:17:11.682987 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fbd518d-ce50-4811-8dc1-74d12a09eb8c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.248508 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"63d173d6-a7cc-42f3-806d-50b9c8f8b189","Type":"ContainerStarted","Data":"3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1"} Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.248553 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"63d173d6-a7cc-42f3-806d-50b9c8f8b189","Type":"ContainerStarted","Data":"31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076"} Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.250764 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fbd518d-ce50-4811-8dc1-74d12a09eb8c","Type":"ContainerDied","Data":"e8c4bb20c71fd4dd8f9333094478285cf8decb6846cf6cb748563bda36c0160a"} Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.250805 4940 scope.go:117] "RemoveContainer" containerID="55a9ba5541c0f40f9c714ac19a220739b7c722566d746d2644ec3d3b672ef513" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.250905 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.277374 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.277352943 podStartE2EDuration="2.277352943s" podCreationTimestamp="2025-11-26 07:17:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:17:12.27190506 +0000 UTC m=+1333.792046679" watchObservedRunningTime="2025-11-26 07:17:12.277352943 +0000 UTC m=+1333.797494572" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.300553 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.317350 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.332081 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:17:12 crc kubenswrapper[4940]: E1126 07:17:12.332586 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fbd518d-ce50-4811-8dc1-74d12a09eb8c" containerName="nova-scheduler-scheduler" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.332609 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fbd518d-ce50-4811-8dc1-74d12a09eb8c" containerName="nova-scheduler-scheduler" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.332847 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fbd518d-ce50-4811-8dc1-74d12a09eb8c" containerName="nova-scheduler-scheduler" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.333646 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.335937 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.353801 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.394650 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.394922 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-config-data\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.395031 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g2fk\" (UniqueName: \"kubernetes.io/projected/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-kube-api-access-7g2fk\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.495938 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.496073 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-config-data\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.496105 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g2fk\" (UniqueName: \"kubernetes.io/projected/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-kube-api-access-7g2fk\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.499691 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-config-data\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.500061 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.514785 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g2fk\" (UniqueName: \"kubernetes.io/projected/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-kube-api-access-7g2fk\") pod \"nova-scheduler-0\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " pod="openstack/nova-scheduler-0" Nov 26 07:17:12 crc kubenswrapper[4940]: I1126 07:17:12.652857 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:17:13 crc kubenswrapper[4940]: I1126 07:17:13.087890 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:17:13 crc kubenswrapper[4940]: W1126 07:17:13.089277 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a6bb8be_4f0f_4e55_ad79_ad0e3a35168d.slice/crio-3d68a4704d1d1d84d4a553f17d4619e94408f53609aec09a490bc06ed5381141 WatchSource:0}: Error finding container 3d68a4704d1d1d84d4a553f17d4619e94408f53609aec09a490bc06ed5381141: Status 404 returned error can't find the container with id 3d68a4704d1d1d84d4a553f17d4619e94408f53609aec09a490bc06ed5381141 Nov 26 07:17:13 crc kubenswrapper[4940]: I1126 07:17:13.181882 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fbd518d-ce50-4811-8dc1-74d12a09eb8c" path="/var/lib/kubelet/pods/2fbd518d-ce50-4811-8dc1-74d12a09eb8c/volumes" Nov 26 07:17:13 crc kubenswrapper[4940]: I1126 07:17:13.263812 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d","Type":"ContainerStarted","Data":"3d68a4704d1d1d84d4a553f17d4619e94408f53609aec09a490bc06ed5381141"} Nov 26 07:17:14 crc kubenswrapper[4940]: I1126 07:17:14.296338 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d","Type":"ContainerStarted","Data":"be9a79f6e9b952d84bd13f052112cb7a980f7370fa0b6edda5ce2ca0596c774c"} Nov 26 07:17:14 crc kubenswrapper[4940]: I1126 07:17:14.318424 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.318402437 podStartE2EDuration="2.318402437s" podCreationTimestamp="2025-11-26 07:17:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:17:14.313722629 +0000 UTC m=+1335.833864248" watchObservedRunningTime="2025-11-26 07:17:14.318402437 +0000 UTC m=+1335.838544056" Nov 26 07:17:15 crc kubenswrapper[4940]: I1126 07:17:15.607617 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:17:15 crc kubenswrapper[4940]: I1126 07:17:15.607928 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:17:17 crc kubenswrapper[4940]: I1126 07:17:17.576242 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:17:17 crc kubenswrapper[4940]: I1126 07:17:17.576649 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:17:17 crc kubenswrapper[4940]: I1126 07:17:17.653759 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 07:17:18 crc kubenswrapper[4940]: I1126 07:17:18.592263 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:17:18 crc kubenswrapper[4940]: I1126 07:17:18.592388 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:17:20 crc kubenswrapper[4940]: I1126 07:17:20.608226 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 07:17:20 crc kubenswrapper[4940]: I1126 07:17:20.608743 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 07:17:21 crc kubenswrapper[4940]: I1126 07:17:21.626378 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:17:21 crc kubenswrapper[4940]: I1126 07:17:21.626402 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:17:21 crc kubenswrapper[4940]: I1126 07:17:21.728308 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:17:21 crc kubenswrapper[4940]: I1126 07:17:21.728538 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:17:22 crc kubenswrapper[4940]: I1126 07:17:22.653315 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 07:17:22 crc kubenswrapper[4940]: I1126 07:17:22.679221 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 07:17:23 crc kubenswrapper[4940]: I1126 07:17:23.406913 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 07:17:27 crc kubenswrapper[4940]: I1126 07:17:27.589900 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 07:17:27 crc kubenswrapper[4940]: I1126 07:17:27.591383 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 07:17:27 crc kubenswrapper[4940]: I1126 07:17:27.595803 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 07:17:27 crc kubenswrapper[4940]: I1126 07:17:27.603023 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 07:17:28 crc kubenswrapper[4940]: I1126 07:17:28.422954 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 07:17:28 crc kubenswrapper[4940]: I1126 07:17:28.431978 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 07:17:30 crc kubenswrapper[4940]: I1126 07:17:30.612103 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 07:17:30 crc kubenswrapper[4940]: I1126 07:17:30.613503 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 07:17:30 crc kubenswrapper[4940]: I1126 07:17:30.621602 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 07:17:31 crc kubenswrapper[4940]: I1126 07:17:31.460098 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 07:17:32 crc kubenswrapper[4940]: I1126 07:17:32.491907 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.532851 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.534679 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.537248 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.537852 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.542217 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.699935 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b3f0b781-b608-4968-a6cb-a48d78634c43-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b3f0b781-b608-4968-a6cb-a48d78634c43\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.700099 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b3f0b781-b608-4968-a6cb-a48d78634c43-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b3f0b781-b608-4968-a6cb-a48d78634c43\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.802181 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b3f0b781-b608-4968-a6cb-a48d78634c43-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b3f0b781-b608-4968-a6cb-a48d78634c43\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.802397 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b3f0b781-b608-4968-a6cb-a48d78634c43-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b3f0b781-b608-4968-a6cb-a48d78634c43\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.802530 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b3f0b781-b608-4968-a6cb-a48d78634c43-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b3f0b781-b608-4968-a6cb-a48d78634c43\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.830538 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b3f0b781-b608-4968-a6cb-a48d78634c43-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b3f0b781-b608-4968-a6cb-a48d78634c43\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:37 crc kubenswrapper[4940]: I1126 07:17:37.854322 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:38 crc kubenswrapper[4940]: I1126 07:17:38.316459 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 07:17:38 crc kubenswrapper[4940]: I1126 07:17:38.514116 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"b3f0b781-b608-4968-a6cb-a48d78634c43","Type":"ContainerStarted","Data":"8c1fa5b91f5cb4672b620ac0e039ef4e7fa075949e7c056774d4f4e34ff251be"} Nov 26 07:17:39 crc kubenswrapper[4940]: I1126 07:17:39.526857 4940 generic.go:334] "Generic (PLEG): container finished" podID="b3f0b781-b608-4968-a6cb-a48d78634c43" containerID="a5201c359f4c2b24cd9ab7c88a90a47b7aa97d0439359daf100cdfb829280c64" exitCode=0 Nov 26 07:17:39 crc kubenswrapper[4940]: I1126 07:17:39.526940 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"b3f0b781-b608-4968-a6cb-a48d78634c43","Type":"ContainerDied","Data":"a5201c359f4c2b24cd9ab7c88a90a47b7aa97d0439359daf100cdfb829280c64"} Nov 26 07:17:40 crc kubenswrapper[4940]: I1126 07:17:40.930596 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.062110 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b3f0b781-b608-4968-a6cb-a48d78634c43-kube-api-access\") pod \"b3f0b781-b608-4968-a6cb-a48d78634c43\" (UID: \"b3f0b781-b608-4968-a6cb-a48d78634c43\") " Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.062344 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b3f0b781-b608-4968-a6cb-a48d78634c43-kubelet-dir\") pod \"b3f0b781-b608-4968-a6cb-a48d78634c43\" (UID: \"b3f0b781-b608-4968-a6cb-a48d78634c43\") " Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.062466 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b3f0b781-b608-4968-a6cb-a48d78634c43-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b3f0b781-b608-4968-a6cb-a48d78634c43" (UID: "b3f0b781-b608-4968-a6cb-a48d78634c43"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.062879 4940 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b3f0b781-b608-4968-a6cb-a48d78634c43-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.069678 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3f0b781-b608-4968-a6cb-a48d78634c43-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b3f0b781-b608-4968-a6cb-a48d78634c43" (UID: "b3f0b781-b608-4968-a6cb-a48d78634c43"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.164442 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b3f0b781-b608-4968-a6cb-a48d78634c43-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.544624 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"b3f0b781-b608-4968-a6cb-a48d78634c43","Type":"ContainerDied","Data":"8c1fa5b91f5cb4672b620ac0e039ef4e7fa075949e7c056774d4f4e34ff251be"} Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.544661 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c1fa5b91f5cb4672b620ac0e039ef4e7fa075949e7c056774d4f4e34ff251be" Nov 26 07:17:41 crc kubenswrapper[4940]: I1126 07:17:41.544721 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.527841 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 07:17:44 crc kubenswrapper[4940]: E1126 07:17:44.528796 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3f0b781-b608-4968-a6cb-a48d78634c43" containerName="pruner" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.528814 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3f0b781-b608-4968-a6cb-a48d78634c43" containerName="pruner" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.529084 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3f0b781-b608-4968-a6cb-a48d78634c43" containerName="pruner" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.529959 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.532673 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.532751 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.536658 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.731522 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66d37527-e535-45de-9f92-0f95d9f7a856-kube-api-access\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.731606 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-kubelet-dir\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.731883 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-var-lock\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.833256 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-var-lock\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.833353 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66d37527-e535-45de-9f92-0f95d9f7a856-kube-api-access\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.833417 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-kubelet-dir\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.833569 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-kubelet-dir\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.833606 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-var-lock\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.851048 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66d37527-e535-45de-9f92-0f95d9f7a856-kube-api-access\") pod \"installer-9-crc\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:44 crc kubenswrapper[4940]: I1126 07:17:44.857908 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:17:45 crc kubenswrapper[4940]: I1126 07:17:45.337428 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 07:17:45 crc kubenswrapper[4940]: I1126 07:17:45.582154 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"66d37527-e535-45de-9f92-0f95d9f7a856","Type":"ContainerStarted","Data":"38da2448a8122076a841e0a9844d569c10cba14a4f39b69134394946a43fd1e1"} Nov 26 07:17:46 crc kubenswrapper[4940]: I1126 07:17:46.592664 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"66d37527-e535-45de-9f92-0f95d9f7a856","Type":"ContainerStarted","Data":"e925b4a56b72465b8a8a205a019f66039a756526e43166d81d1d6a0c679b4910"} Nov 26 07:17:46 crc kubenswrapper[4940]: I1126 07:17:46.614272 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.61424907 podStartE2EDuration="2.61424907s" podCreationTimestamp="2025-11-26 07:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:17:46.607883428 +0000 UTC m=+1368.128025047" watchObservedRunningTime="2025-11-26 07:17:46.61424907 +0000 UTC m=+1368.134390689" Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.582106 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.582554 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="6f4d1c84-5193-402a-9264-cd2bb000633c" containerName="openstackclient" containerID="cri-o://02981c04b83b2ef74e7cdd7032106bee3b1fd93d22f221e3394e2f19eaf07d07" gracePeriod=2 Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.609466 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.680411 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.680753 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="openstack-network-exporter" containerID="cri-o://fc517e7c3e97111fb38bbf8de532d3a15807567d75f6bc5c029091d6af85b9a5" gracePeriod=300 Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.709397 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.727745 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.727790 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.781485 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.781707 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="ovn-northd" containerID="cri-o://12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" gracePeriod=30 Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.782376 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="openstack-network-exporter" containerID="cri-o://b0bf7e3ce23d8aabef777f75a95394afce2f910c3c79d9220de648b9329b628f" gracePeriod=30 Nov 26 07:17:51 crc kubenswrapper[4940]: E1126 07:17:51.874377 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:51 crc kubenswrapper[4940]: E1126 07:17:51.874717 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data podName:69972749-03ff-48e9-b031-99c33ce86e96 nodeName:}" failed. No retries permitted until 2025-11-26 07:17:52.374694531 +0000 UTC m=+1373.894836240 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data") pod "rabbitmq-cell1-server-0" (UID: "69972749-03ff-48e9-b031-99c33ce86e96") : configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.950133 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.995668 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican2dca-account-delete-4kp2b"] Nov 26 07:17:51 crc kubenswrapper[4940]: E1126 07:17:51.996169 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f4d1c84-5193-402a-9264-cd2bb000633c" containerName="openstackclient" Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.996189 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f4d1c84-5193-402a-9264-cd2bb000633c" containerName="openstackclient" Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.996397 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f4d1c84-5193-402a-9264-cd2bb000633c" containerName="openstackclient" Nov 26 07:17:51 crc kubenswrapper[4940]: I1126 07:17:51.997159 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.019679 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican2dca-account-delete-4kp2b"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.084576 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59926125-c1e1-4ac6-aa0e-2c4256046612-operator-scripts\") pod \"barbican2dca-account-delete-4kp2b\" (UID: \"59926125-c1e1-4ac6-aa0e-2c4256046612\") " pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.084670 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzrr4\" (UniqueName: \"kubernetes.io/projected/59926125-c1e1-4ac6-aa0e-2c4256046612-kube-api-access-fzrr4\") pod \"barbican2dca-account-delete-4kp2b\" (UID: \"59926125-c1e1-4ac6-aa0e-2c4256046612\") " pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:52 crc kubenswrapper[4940]: E1126 07:17:52.087408 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 07:17:52 crc kubenswrapper[4940]: E1126 07:17:52.087466 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data podName:11a17576-9a94-4e2d-8915-9d838de09f0b nodeName:}" failed. No retries permitted until 2025-11-26 07:17:52.587448688 +0000 UTC m=+1374.107590307 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data") pod "rabbitmq-server-0" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b") : configmap "rabbitmq-config-data" not found Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.182239 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="ovsdbserver-sb" containerID="cri-o://94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f" gracePeriod=300 Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.187503 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59926125-c1e1-4ac6-aa0e-2c4256046612-operator-scripts\") pod \"barbican2dca-account-delete-4kp2b\" (UID: \"59926125-c1e1-4ac6-aa0e-2c4256046612\") " pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.187604 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzrr4\" (UniqueName: \"kubernetes.io/projected/59926125-c1e1-4ac6-aa0e-2c4256046612-kube-api-access-fzrr4\") pod \"barbican2dca-account-delete-4kp2b\" (UID: \"59926125-c1e1-4ac6-aa0e-2c4256046612\") " pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.189099 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59926125-c1e1-4ac6-aa0e-2c4256046612-operator-scripts\") pod \"barbican2dca-account-delete-4kp2b\" (UID: \"59926125-c1e1-4ac6-aa0e-2c4256046612\") " pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.357633 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzrr4\" (UniqueName: \"kubernetes.io/projected/59926125-c1e1-4ac6-aa0e-2c4256046612-kube-api-access-fzrr4\") pod \"barbican2dca-account-delete-4kp2b\" (UID: \"59926125-c1e1-4ac6-aa0e-2c4256046612\") " pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:52 crc kubenswrapper[4940]: E1126 07:17:52.475338 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:52 crc kubenswrapper[4940]: E1126 07:17:52.475417 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data podName:69972749-03ff-48e9-b031-99c33ce86e96 nodeName:}" failed. No retries permitted until 2025-11-26 07:17:53.475399552 +0000 UTC m=+1374.995541171 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data") pod "rabbitmq-cell1-server-0" (UID: "69972749-03ff-48e9-b031-99c33ce86e96") : configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.493643 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance6413-account-delete-nckhw"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.495231 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.496855 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.583115 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance6413-account-delete-nckhw"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.590479 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da80f9db-6be1-459c-9d61-ca1fc206d472-operator-scripts\") pod \"glance6413-account-delete-nckhw\" (UID: \"da80f9db-6be1-459c-9d61-ca1fc206d472\") " pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.590982 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x87dv\" (UniqueName: \"kubernetes.io/projected/da80f9db-6be1-459c-9d61-ca1fc206d472-kube-api-access-x87dv\") pod \"glance6413-account-delete-nckhw\" (UID: \"da80f9db-6be1-459c-9d61-ca1fc206d472\") " pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:52 crc kubenswrapper[4940]: E1126 07:17:52.590616 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 07:17:52 crc kubenswrapper[4940]: E1126 07:17:52.593666 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data podName:11a17576-9a94-4e2d-8915-9d838de09f0b nodeName:}" failed. No retries permitted until 2025-11-26 07:17:53.593634242 +0000 UTC m=+1375.113775871 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data") pod "rabbitmq-server-0" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b") : configmap "rabbitmq-config-data" not found Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.636577 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-nv2vz"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.667131 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-l5smw"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.698923 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da80f9db-6be1-459c-9d61-ca1fc206d472-operator-scripts\") pod \"glance6413-account-delete-nckhw\" (UID: \"da80f9db-6be1-459c-9d61-ca1fc206d472\") " pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.699810 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x87dv\" (UniqueName: \"kubernetes.io/projected/da80f9db-6be1-459c-9d61-ca1fc206d472-kube-api-access-x87dv\") pod \"glance6413-account-delete-nckhw\" (UID: \"da80f9db-6be1-459c-9d61-ca1fc206d472\") " pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.710365 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-nv2vz"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.715260 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da80f9db-6be1-459c-9d61-ca1fc206d472-operator-scripts\") pod \"glance6413-account-delete-nckhw\" (UID: \"da80f9db-6be1-459c-9d61-ca1fc206d472\") " pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.764363 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-l5smw"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.769092 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x87dv\" (UniqueName: \"kubernetes.io/projected/da80f9db-6be1-459c-9d61-ca1fc206d472-kube-api-access-x87dv\") pod \"glance6413-account-delete-nckhw\" (UID: \"da80f9db-6be1-459c-9d61-ca1fc206d472\") " pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.778521 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron2ea3-account-delete-7wj9l"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.779593 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_69f4262a-7eb3-4091-b103-393b9ab3a720/ovsdbserver-sb/0.log" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.779634 4940 generic.go:334] "Generic (PLEG): container finished" podID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerID="fc517e7c3e97111fb38bbf8de532d3a15807567d75f6bc5c029091d6af85b9a5" exitCode=2 Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.780442 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"69f4262a-7eb3-4091-b103-393b9ab3a720","Type":"ContainerDied","Data":"fc517e7c3e97111fb38bbf8de532d3a15807567d75f6bc5c029091d6af85b9a5"} Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.780633 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.806337 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron2ea3-account-delete-7wj9l"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.815455 4940 generic.go:334] "Generic (PLEG): container finished" podID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerID="b0bf7e3ce23d8aabef777f75a95394afce2f910c3c79d9220de648b9329b628f" exitCode=2 Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.815495 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cda3e3c5-7a68-4269-8c15-b463b9263805","Type":"ContainerDied","Data":"b0bf7e3ce23d8aabef777f75a95394afce2f910c3c79d9220de648b9329b628f"} Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.840236 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placementf807-account-delete-wgr4g"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.841435 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.901386 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.906113 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementf807-account-delete-wgr4g"] Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.920559 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a6c9a20-06bc-43f8-aad9-fb5d72231110-operator-scripts\") pod \"neutron2ea3-account-delete-7wj9l\" (UID: \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\") " pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:52 crc kubenswrapper[4940]: I1126 07:17:52.921093 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k22mj\" (UniqueName: \"kubernetes.io/projected/4a6c9a20-06bc-43f8-aad9-fb5d72231110-kube-api-access-k22mj\") pod \"neutron2ea3-account-delete-7wj9l\" (UID: \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\") " pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.002815 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder7f7c-account-delete-rkd24"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.004408 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.029197 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdjtw\" (UniqueName: \"kubernetes.io/projected/eb19b71d-413f-46df-a509-7dc7aff75598-kube-api-access-kdjtw\") pod \"placementf807-account-delete-wgr4g\" (UID: \"eb19b71d-413f-46df-a509-7dc7aff75598\") " pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.029257 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k22mj\" (UniqueName: \"kubernetes.io/projected/4a6c9a20-06bc-43f8-aad9-fb5d72231110-kube-api-access-k22mj\") pod \"neutron2ea3-account-delete-7wj9l\" (UID: \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\") " pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.029323 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb19b71d-413f-46df-a509-7dc7aff75598-operator-scripts\") pod \"placementf807-account-delete-wgr4g\" (UID: \"eb19b71d-413f-46df-a509-7dc7aff75598\") " pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.029343 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a6c9a20-06bc-43f8-aad9-fb5d72231110-operator-scripts\") pod \"neutron2ea3-account-delete-7wj9l\" (UID: \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\") " pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.035434 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a6c9a20-06bc-43f8-aad9-fb5d72231110-operator-scripts\") pod \"neutron2ea3-account-delete-7wj9l\" (UID: \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\") " pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.062242 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k22mj\" (UniqueName: \"kubernetes.io/projected/4a6c9a20-06bc-43f8-aad9-fb5d72231110-kube-api-access-k22mj\") pod \"neutron2ea3-account-delete-7wj9l\" (UID: \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\") " pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.083466 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder7f7c-account-delete-rkd24"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.130621 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdjtw\" (UniqueName: \"kubernetes.io/projected/eb19b71d-413f-46df-a509-7dc7aff75598-kube-api-access-kdjtw\") pod \"placementf807-account-delete-wgr4g\" (UID: \"eb19b71d-413f-46df-a509-7dc7aff75598\") " pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.130677 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb19b71d-413f-46df-a509-7dc7aff75598-operator-scripts\") pod \"placementf807-account-delete-wgr4g\" (UID: \"eb19b71d-413f-46df-a509-7dc7aff75598\") " pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.130727 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6rsd\" (UniqueName: \"kubernetes.io/projected/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-kube-api-access-c6rsd\") pod \"cinder7f7c-account-delete-rkd24\" (UID: \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\") " pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.130801 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-operator-scripts\") pod \"cinder7f7c-account-delete-rkd24\" (UID: \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\") " pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.131798 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb19b71d-413f-46df-a509-7dc7aff75598-operator-scripts\") pod \"placementf807-account-delete-wgr4g\" (UID: \"eb19b71d-413f-46df-a509-7dc7aff75598\") " pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.146653 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-m6x8r"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.185198 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdjtw\" (UniqueName: \"kubernetes.io/projected/eb19b71d-413f-46df-a509-7dc7aff75598-kube-api-access-kdjtw\") pod \"placementf807-account-delete-wgr4g\" (UID: \"eb19b71d-413f-46df-a509-7dc7aff75598\") " pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.193901 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c3c0935-a87e-4709-a468-f00d5e8b3ee1" path="/var/lib/kubelet/pods/0c3c0935-a87e-4709-a468-f00d5e8b3ee1/volumes" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.194796 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30b2a8d7-c2a4-462f-a93d-dc4b79f238bd" path="/var/lib/kubelet/pods/30b2a8d7-c2a4-462f-a93d-dc4b79f238bd/volumes" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.195433 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-g4vh8"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.197096 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-m6x8r"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.199341 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.216092 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-g4vh8"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.216664 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.233831 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-operator-scripts\") pod \"cinder7f7c-account-delete-rkd24\" (UID: \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\") " pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.234168 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6rsd\" (UniqueName: \"kubernetes.io/projected/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-kube-api-access-c6rsd\") pod \"cinder7f7c-account-delete-rkd24\" (UID: \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\") " pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.234510 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-operator-scripts\") pod \"cinder7f7c-account-delete-rkd24\" (UID: \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\") " pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.263400 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6rsd\" (UniqueName: \"kubernetes.io/projected/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-kube-api-access-c6rsd\") pod \"cinder7f7c-account-delete-rkd24\" (UID: \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\") " pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.284199 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-k9l7t"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.314841 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-5fj2b"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.315098 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-5fj2b" podUID="e3baa7ac-9221-47e0-afb0-25715f0e2491" containerName="openstack-network-exporter" containerID="cri-o://dd7562cc322c44e51d88e100358bb3ef6a12495c3a5c5238e4a92c094bcd8272" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.334375 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-78r7g"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.352494 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7f54fb65-6vndb"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.352712 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" podUID="0a428662-7264-4abe-837b-64739810c829" containerName="dnsmasq-dns" containerID="cri-o://05b9781e441ba29ec642551555aa92180638341b151ecdd169347a950574b1fc" gracePeriod=10 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.369165 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-f6s5k"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.381540 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapi31fa-account-delete-fjlwz"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.383087 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.415080 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.425378 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-f6s5k"] Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.432118 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.437519 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.437567 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="ovn-northd" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.469387 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-wkhpm"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.503610 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-wkhpm"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.521536 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.540219 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts\") pod \"novaapi31fa-account-delete-fjlwz\" (UID: \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\") " pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.540287 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4bff\" (UniqueName: \"kubernetes.io/projected/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-kube-api-access-z4bff\") pod \"novaapi31fa-account-delete-fjlwz\" (UID: \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\") " pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.540386 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.540432 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data podName:69972749-03ff-48e9-b031-99c33ce86e96 nodeName:}" failed. No retries permitted until 2025-11-26 07:17:55.540417681 +0000 UTC m=+1377.060559290 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data") pod "rabbitmq-cell1-server-0" (UID: "69972749-03ff-48e9-b031-99c33ce86e96") : configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.554725 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi31fa-account-delete-fjlwz"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.567159 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0ee23-account-delete-qkbnr"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.568670 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.583946 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0ee23-account-delete-qkbnr"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.600943 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.601335 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="99d95f08-663c-4443-9a16-459f02985879" containerName="openstack-network-exporter" containerID="cri-o://a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13" gracePeriod=300 Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.611759 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f is running failed: container process not found" containerID="94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.624558 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.624914 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerName="glance-log" containerID="cri-o://8233c7cbc4acce8bdbeebbef4a2bf8d2190310e50e6de48733f430fc0c6cf042" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.625232 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerName="glance-httpd" containerID="cri-o://0ffb3bf39d8a1f37181283238b716fcbcfefeaecec4ce53764db5c4edb369cee" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.633552 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f is running failed: container process not found" containerID="94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.640969 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f is running failed: container process not found" containerID="94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.641450 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="ovsdbserver-sb" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.643311 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts\") pod \"novaapi31fa-account-delete-fjlwz\" (UID: \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\") " pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.642336 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts\") pod \"novaapi31fa-account-delete-fjlwz\" (UID: \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\") " pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.646936 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4bff\" (UniqueName: \"kubernetes.io/projected/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-kube-api-access-z4bff\") pod \"novaapi31fa-account-delete-fjlwz\" (UID: \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\") " pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.647169 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-operator-scripts\") pod \"novacell0ee23-account-delete-qkbnr\" (UID: \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\") " pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.647333 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 07:17:53 crc kubenswrapper[4940]: E1126 07:17:53.647400 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data podName:11a17576-9a94-4e2d-8915-9d838de09f0b nodeName:}" failed. No retries permitted until 2025-11-26 07:17:55.647380143 +0000 UTC m=+1377.167521762 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data") pod "rabbitmq-server-0" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b") : configmap "rabbitmq-config-data" not found Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.650633 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9fsf\" (UniqueName: \"kubernetes.io/projected/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-kube-api-access-m9fsf\") pod \"novacell0ee23-account-delete-qkbnr\" (UID: \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\") " pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.703095 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4bff\" (UniqueName: \"kubernetes.io/projected/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-kube-api-access-z4bff\") pod \"novaapi31fa-account-delete-fjlwz\" (UID: \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\") " pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.739843 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-j86bw"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.776737 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="99d95f08-663c-4443-9a16-459f02985879" containerName="ovsdbserver-nb" containerID="cri-o://a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0" gracePeriod=300 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.779593 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9fsf\" (UniqueName: \"kubernetes.io/projected/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-kube-api-access-m9fsf\") pod \"novacell0ee23-account-delete-qkbnr\" (UID: \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\") " pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.779838 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-operator-scripts\") pod \"novacell0ee23-account-delete-qkbnr\" (UID: \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\") " pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.791739 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-operator-scripts\") pod \"novacell0ee23-account-delete-qkbnr\" (UID: \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\") " pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.793129 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-j86bw"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.805883 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-44sqk"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.811680 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9fsf\" (UniqueName: \"kubernetes.io/projected/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-kube-api-access-m9fsf\") pod \"novacell0ee23-account-delete-qkbnr\" (UID: \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\") " pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.819869 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-44sqk"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.828749 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.829426 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-server" containerID="cri-o://5832f00fdc7b07d3b583da1f514fceb0172f7918ac8ced3a03dda26a1c0934ea" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.829970 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="swift-recon-cron" containerID="cri-o://5968ea4146aabb9243cd7bc0fcedda38425122a8df83965feee0250ed0d15f33" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830028 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="rsync" containerID="cri-o://18fd11a465765ba259762706e393ce42274d6d5ab6b21c460bed17a0534150bb" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830094 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-expirer" containerID="cri-o://85ac1cbb7cd8cd7a99e39dcf3fc62fbf9041ad24323c288d9baf670d703ac447" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830128 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-updater" containerID="cri-o://e58705bee99fbf1a356b726ecc7d48c7a1d44cee6e432d30db17de1a4b1bed0c" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830165 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-auditor" containerID="cri-o://184283b7cf9ea6a22c37b80e59b65273e5c1e54072b94ba5e98ff402061ac3b7" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830205 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-replicator" containerID="cri-o://a370f34c9d0093cf91dd550d53235e78ccb9de14c218c3ae695b5536b1207fa8" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830244 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-server" containerID="cri-o://ddacc922294d2d9560d232e885d82b0359325dab5663024167d5a82671b91dfe" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830280 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-updater" containerID="cri-o://35af38ce835d55824412db931544d40f54c6a971946a5d4b50c5dfa394ce269c" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830316 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-auditor" containerID="cri-o://f7ed6711acf7fdec231f79586d9ef7609a087d63274a505334d0634157294d0a" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830352 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-replicator" containerID="cri-o://5c0f62d433b891f3245b23b16d6de813e3eab74a72c1c1978aa9aadf0b7c327d" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830388 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-server" containerID="cri-o://8d0b0bedcd7f34458be64dffa9614d1cddea6dd92857846272fc36941c4d41da" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830741 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-reaper" containerID="cri-o://7d1c1ce5f4f86fefb0522c2b4bb84960ef9691ba82d0c28a857ae9348d2ead68" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830816 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-auditor" containerID="cri-o://7a9b2378bf609eb570091f1edebe633dcf651482009fd78a7715a03ed6c3da04" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.830875 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-replicator" containerID="cri-o://1fb92f00872a9aa36d49326d1cf65db8a9032a280f9587ed0d9216aef9800d95" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.836636 4940 generic.go:334] "Generic (PLEG): container finished" podID="99d95f08-663c-4443-9a16-459f02985879" containerID="a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13" exitCode=2 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.836715 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"99d95f08-663c-4443-9a16-459f02985879","Type":"ContainerDied","Data":"a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13"} Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.839500 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7648b55b6f-h7txx"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.839951 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7648b55b6f-h7txx" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-api" containerID="cri-o://8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.840939 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7648b55b6f-h7txx" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-httpd" containerID="cri-o://5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.851025 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-5fj2b_e3baa7ac-9221-47e0-afb0-25715f0e2491/openstack-network-exporter/0.log" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.851079 4940 generic.go:334] "Generic (PLEG): container finished" podID="e3baa7ac-9221-47e0-afb0-25715f0e2491" containerID="dd7562cc322c44e51d88e100358bb3ef6a12495c3a5c5238e4a92c094bcd8272" exitCode=2 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.851159 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-5fj2b" event={"ID":"e3baa7ac-9221-47e0-afb0-25715f0e2491","Type":"ContainerDied","Data":"dd7562cc322c44e51d88e100358bb3ef6a12495c3a5c5238e4a92c094bcd8272"} Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.854191 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-65d858fd7b-dbln9"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.854399 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-65d858fd7b-dbln9" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" containerName="placement-log" containerID="cri-o://26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.854886 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-65d858fd7b-dbln9" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" containerName="placement-api" containerID="cri-o://473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.870449 4940 generic.go:334] "Generic (PLEG): container finished" podID="0a428662-7264-4abe-837b-64739810c829" containerID="05b9781e441ba29ec642551555aa92180638341b151ecdd169347a950574b1fc" exitCode=0 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.870561 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" event={"ID":"0a428662-7264-4abe-837b-64739810c829","Type":"ContainerDied","Data":"05b9781e441ba29ec642551555aa92180638341b151ecdd169347a950574b1fc"} Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.883379 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.883632 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerName="glance-log" containerID="cri-o://5d1a5be719119c7cfea6def8095a88a5e3c62d7b6d09bd5c94f113735004fb3f" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.884235 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerName="glance-httpd" containerID="cri-o://1b14a7d61d2e90b0760d7d58727eb7a9fea2999afab2a7de8b185771cfd9eea1" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.886931 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_69f4262a-7eb3-4091-b103-393b9ab3a720/ovsdbserver-sb/0.log" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.886979 4940 generic.go:334] "Generic (PLEG): container finished" podID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerID="94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f" exitCode=143 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.887067 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"69f4262a-7eb3-4091-b103-393b9ab3a720","Type":"ContainerDied","Data":"94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f"} Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.898161 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.908579 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.908867 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerName="cinder-scheduler" containerID="cri-o://240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.909008 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerName="probe" containerID="cri-o://f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.913678 4940 generic.go:334] "Generic (PLEG): container finished" podID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerID="8233c7cbc4acce8bdbeebbef4a2bf8d2190310e50e6de48733f430fc0c6cf042" exitCode=143 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.913763 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"79fc2ca6-8d13-44e6-83da-033c7f2d7df3","Type":"ContainerDied","Data":"8233c7cbc4acce8bdbeebbef4a2bf8d2190310e50e6de48733f430fc0c6cf042"} Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.918385 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.918634 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api-log" containerID="cri-o://f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.918728 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api" containerID="cri-o://cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.928146 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.931477 4940 generic.go:334] "Generic (PLEG): container finished" podID="6f4d1c84-5193-402a-9264-cd2bb000633c" containerID="02981c04b83b2ef74e7cdd7032106bee3b1fd93d22f221e3394e2f19eaf07d07" exitCode=137 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.949100 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.949542 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.949726 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-log" containerID="cri-o://74f237af1603b7679312e5f0ffe714ea2bf8e1fd4fcf073910b8ab40cc83aaff" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.950116 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-api" containerID="cri-o://62fc80217f3946465f0f5d0d83b59bf3c3a8c7cea1b4b8754c8843a8b67e8e47" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.958894 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6db949d4cf-kdv49"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.962530 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6db949d4cf-kdv49" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerName="barbican-worker-log" containerID="cri-o://68f96383a43cb6661fc9011d58327bf045a794abb89b6d697dff06c631c6488d" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.963075 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6db949d4cf-kdv49" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerName="barbican-worker" containerID="cri-o://b2273f1e55c5a69eb78cfb44375ac989afcca3b119c94705b8c9afb07c4525d3" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.981941 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7864499f6b-p77t2"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.983405 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerName="barbican-keystone-listener-log" containerID="cri-o://4bf744e80f9468639290e0122be55a99e227ce639f923bfca4bfba0a9c767cde" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.983931 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerName="barbican-keystone-listener" containerID="cri-o://6ec52072d9ea19666daec0c37b2fb57fbc8d500a0f552cf60f1fb0ea6a25ed06" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.995625 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6774864d76-mfv42"] Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.995997 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6774864d76-mfv42" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api-log" containerID="cri-o://af6d07c01a8f45e7b8f9c9d2cebf33c57841077f225e3827093818e8a7e717c3" gracePeriod=30 Nov 26 07:17:53 crc kubenswrapper[4940]: I1126 07:17:53.996277 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6774864d76-mfv42" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api" containerID="cri-o://38ebe7a40f2302221b977bb730a4dafa3b829dbc717ce7001d421d16b83d3667" gracePeriod=30 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.004535 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerName="rabbitmq" containerID="cri-o://ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445" gracePeriod=604800 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.013432 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.013692 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-log" containerID="cri-o://31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076" gracePeriod=30 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.014013 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-metadata" containerID="cri-o://3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1" gracePeriod=30 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.024532 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.034999 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-bmbz8"] Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.052575 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-d6fe-account-create-update-v2428"] Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.064138 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-bmbz8"] Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.078159 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-d6fe-account-create-update-v2428"] Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.086572 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.087082 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="90360054-700b-4de8-9f51-f9b19cde50e0" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a5e3b511393841e8f4d387df3c57cc2eeb6640159bbbae1ce4b53392fe9b9546" gracePeriod=30 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.090692 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:54 crc kubenswrapper[4940]: E1126 07:17:54.102496 4940 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 26 07:17:54 crc kubenswrapper[4940]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 07:17:54 crc kubenswrapper[4940]: + source /usr/local/bin/container-scripts/functions Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNBridge=br-int Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNRemote=tcp:localhost:6642 Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNEncapType=geneve Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNAvailabilityZones= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ EnableChassisAsGateway=true Nov 26 07:17:54 crc kubenswrapper[4940]: ++ PhysicalNetworks= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNHostName= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 07:17:54 crc kubenswrapper[4940]: ++ ovs_dir=/var/lib/openvswitch Nov 26 07:17:54 crc kubenswrapper[4940]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 07:17:54 crc kubenswrapper[4940]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 07:17:54 crc kubenswrapper[4940]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 07:17:54 crc kubenswrapper[4940]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 07:17:54 crc kubenswrapper[4940]: + sleep 0.5 Nov 26 07:17:54 crc kubenswrapper[4940]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 07:17:54 crc kubenswrapper[4940]: + cleanup_ovsdb_server_semaphore Nov 26 07:17:54 crc kubenswrapper[4940]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 07:17:54 crc kubenswrapper[4940]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 07:17:54 crc kubenswrapper[4940]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-k9l7t" message=< Nov 26 07:17:54 crc kubenswrapper[4940]: Exiting ovsdb-server (5) [ OK ] Nov 26 07:17:54 crc kubenswrapper[4940]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 07:17:54 crc kubenswrapper[4940]: + source /usr/local/bin/container-scripts/functions Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNBridge=br-int Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNRemote=tcp:localhost:6642 Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNEncapType=geneve Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNAvailabilityZones= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ EnableChassisAsGateway=true Nov 26 07:17:54 crc kubenswrapper[4940]: ++ PhysicalNetworks= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNHostName= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 07:17:54 crc kubenswrapper[4940]: ++ ovs_dir=/var/lib/openvswitch Nov 26 07:17:54 crc kubenswrapper[4940]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 07:17:54 crc kubenswrapper[4940]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 07:17:54 crc kubenswrapper[4940]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 07:17:54 crc kubenswrapper[4940]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 07:17:54 crc kubenswrapper[4940]: + sleep 0.5 Nov 26 07:17:54 crc kubenswrapper[4940]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 07:17:54 crc kubenswrapper[4940]: + cleanup_ovsdb_server_semaphore Nov 26 07:17:54 crc kubenswrapper[4940]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 07:17:54 crc kubenswrapper[4940]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 07:17:54 crc kubenswrapper[4940]: > Nov 26 07:17:54 crc kubenswrapper[4940]: E1126 07:17:54.102550 4940 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 26 07:17:54 crc kubenswrapper[4940]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 07:17:54 crc kubenswrapper[4940]: + source /usr/local/bin/container-scripts/functions Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNBridge=br-int Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNRemote=tcp:localhost:6642 Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNEncapType=geneve Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNAvailabilityZones= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ EnableChassisAsGateway=true Nov 26 07:17:54 crc kubenswrapper[4940]: ++ PhysicalNetworks= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ OVNHostName= Nov 26 07:17:54 crc kubenswrapper[4940]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 07:17:54 crc kubenswrapper[4940]: ++ ovs_dir=/var/lib/openvswitch Nov 26 07:17:54 crc kubenswrapper[4940]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 07:17:54 crc kubenswrapper[4940]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 07:17:54 crc kubenswrapper[4940]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 07:17:54 crc kubenswrapper[4940]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 07:17:54 crc kubenswrapper[4940]: + sleep 0.5 Nov 26 07:17:54 crc kubenswrapper[4940]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 07:17:54 crc kubenswrapper[4940]: + cleanup_ovsdb_server_semaphore Nov 26 07:17:54 crc kubenswrapper[4940]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 07:17:54 crc kubenswrapper[4940]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 07:17:54 crc kubenswrapper[4940]: > pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" containerID="cri-o://3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.102593 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" containerID="cri-o://3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" gracePeriod=30 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.164665 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="69972749-03ff-48e9-b031-99c33ce86e96" containerName="rabbitmq" containerID="cri-o://ffc8d224e6ee06035af2a49a3dfbb96ff41fdb30dc4fc3b71983a00df2b005c0" gracePeriod=604800 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.270234 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" containerID="cri-o://b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" gracePeriod=30 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.308511 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" containerName="galera" containerID="cri-o://6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab" gracePeriod=30 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.595216 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_69f4262a-7eb3-4091-b103-393b9ab3a720/ovsdbserver-sb/0.log" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.595531 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.625890 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.644267 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.688892 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-5fj2b_e3baa7ac-9221-47e0-afb0-25715f0e2491/openstack-network-exporter/0.log" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.688958 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.720736 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-combined-ca-bundle\") pod \"69f4262a-7eb3-4091-b103-393b9ab3a720\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.720775 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-metrics-certs-tls-certs\") pod \"e3baa7ac-9221-47e0-afb0-25715f0e2491\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.720800 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-scripts\") pod \"69f4262a-7eb3-4091-b103-393b9ab3a720\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.720844 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"69f4262a-7eb3-4091-b103-393b9ab3a720\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.720911 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p2jf\" (UniqueName: \"kubernetes.io/projected/e3baa7ac-9221-47e0-afb0-25715f0e2491-kube-api-access-9p2jf\") pod \"e3baa7ac-9221-47e0-afb0-25715f0e2491\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.720952 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovs-rundir\") pod \"e3baa7ac-9221-47e0-afb0-25715f0e2491\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.720995 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3baa7ac-9221-47e0-afb0-25715f0e2491-config\") pod \"e3baa7ac-9221-47e0-afb0-25715f0e2491\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721015 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-sb\") pod \"0a428662-7264-4abe-837b-64739810c829\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721055 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdb-rundir\") pod \"69f4262a-7eb3-4091-b103-393b9ab3a720\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721091 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-config\") pod \"69f4262a-7eb3-4091-b103-393b9ab3a720\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721135 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-nb\") pod \"0a428662-7264-4abe-837b-64739810c829\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721160 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7cdd\" (UniqueName: \"kubernetes.io/projected/69f4262a-7eb3-4091-b103-393b9ab3a720-kube-api-access-w7cdd\") pod \"69f4262a-7eb3-4091-b103-393b9ab3a720\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721182 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-combined-ca-bundle\") pod \"e3baa7ac-9221-47e0-afb0-25715f0e2491\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721200 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdbserver-sb-tls-certs\") pod \"69f4262a-7eb3-4091-b103-393b9ab3a720\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721229 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-combined-ca-bundle\") pod \"6f4d1c84-5193-402a-9264-cd2bb000633c\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721320 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbpb7\" (UniqueName: \"kubernetes.io/projected/0a428662-7264-4abe-837b-64739810c829-kube-api-access-xbpb7\") pod \"0a428662-7264-4abe-837b-64739810c829\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721364 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-metrics-certs-tls-certs\") pod \"69f4262a-7eb3-4091-b103-393b9ab3a720\" (UID: \"69f4262a-7eb3-4091-b103-393b9ab3a720\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721742 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkcmv\" (UniqueName: \"kubernetes.io/projected/6f4d1c84-5193-402a-9264-cd2bb000633c-kube-api-access-mkcmv\") pod \"6f4d1c84-5193-402a-9264-cd2bb000633c\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721766 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config\") pod \"6f4d1c84-5193-402a-9264-cd2bb000633c\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.721810 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-swift-storage-0\") pod \"0a428662-7264-4abe-837b-64739810c829\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.725209 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "69f4262a-7eb3-4091-b103-393b9ab3a720" (UID: "69f4262a-7eb3-4091-b103-393b9ab3a720"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.725288 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "e3baa7ac-9221-47e0-afb0-25715f0e2491" (UID: "e3baa7ac-9221-47e0-afb0-25715f0e2491"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.726711 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3baa7ac-9221-47e0-afb0-25715f0e2491-config" (OuterVolumeSpecName: "config") pod "e3baa7ac-9221-47e0-afb0-25715f0e2491" (UID: "e3baa7ac-9221-47e0-afb0-25715f0e2491"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.726937 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-config" (OuterVolumeSpecName: "config") pod "69f4262a-7eb3-4091-b103-393b9ab3a720" (UID: "69f4262a-7eb3-4091-b103-393b9ab3a720"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.731852 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-scripts" (OuterVolumeSpecName: "scripts") pod "69f4262a-7eb3-4091-b103-393b9ab3a720" (UID: "69f4262a-7eb3-4091-b103-393b9ab3a720"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.734643 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69f4262a-7eb3-4091-b103-393b9ab3a720-kube-api-access-w7cdd" (OuterVolumeSpecName: "kube-api-access-w7cdd") pod "69f4262a-7eb3-4091-b103-393b9ab3a720" (UID: "69f4262a-7eb3-4091-b103-393b9ab3a720"). InnerVolumeSpecName "kube-api-access-w7cdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.734797 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3baa7ac-9221-47e0-afb0-25715f0e2491-kube-api-access-9p2jf" (OuterVolumeSpecName: "kube-api-access-9p2jf") pod "e3baa7ac-9221-47e0-afb0-25715f0e2491" (UID: "e3baa7ac-9221-47e0-afb0-25715f0e2491"). InnerVolumeSpecName "kube-api-access-9p2jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.759311 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "69f4262a-7eb3-4091-b103-393b9ab3a720" (UID: "69f4262a-7eb3-4091-b103-393b9ab3a720"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.771182 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f4d1c84-5193-402a-9264-cd2bb000633c-kube-api-access-mkcmv" (OuterVolumeSpecName: "kube-api-access-mkcmv") pod "6f4d1c84-5193-402a-9264-cd2bb000633c" (UID: "6f4d1c84-5193-402a-9264-cd2bb000633c"). InnerVolumeSpecName "kube-api-access-mkcmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.780289 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a428662-7264-4abe-837b-64739810c829-kube-api-access-xbpb7" (OuterVolumeSpecName: "kube-api-access-xbpb7") pod "0a428662-7264-4abe-837b-64739810c829" (UID: "0a428662-7264-4abe-837b-64739810c829"). InnerVolumeSpecName "kube-api-access-xbpb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.823561 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovn-rundir\") pod \"e3baa7ac-9221-47e0-afb0-25715f0e2491\" (UID: \"e3baa7ac-9221-47e0-afb0-25715f0e2491\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.823621 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config-secret\") pod \"6f4d1c84-5193-402a-9264-cd2bb000633c\" (UID: \"6f4d1c84-5193-402a-9264-cd2bb000633c\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.823654 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-svc\") pod \"0a428662-7264-4abe-837b-64739810c829\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.823658 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "e3baa7ac-9221-47e0-afb0-25715f0e2491" (UID: "e3baa7ac-9221-47e0-afb0-25715f0e2491"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.823687 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-config\") pod \"0a428662-7264-4abe-837b-64739810c829\" (UID: \"0a428662-7264-4abe-837b-64739810c829\") " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824236 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbpb7\" (UniqueName: \"kubernetes.io/projected/0a428662-7264-4abe-837b-64739810c829-kube-api-access-xbpb7\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824255 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkcmv\" (UniqueName: \"kubernetes.io/projected/6f4d1c84-5193-402a-9264-cd2bb000633c-kube-api-access-mkcmv\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824267 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824278 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824319 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824334 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9p2jf\" (UniqueName: \"kubernetes.io/projected/e3baa7ac-9221-47e0-afb0-25715f0e2491-kube-api-access-9p2jf\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824345 4940 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/e3baa7ac-9221-47e0-afb0-25715f0e2491-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824355 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3baa7ac-9221-47e0-afb0-25715f0e2491-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824365 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824375 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69f4262a-7eb3-4091-b103-393b9ab3a720-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.824385 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7cdd\" (UniqueName: \"kubernetes.io/projected/69f4262a-7eb3-4091-b103-393b9ab3a720-kube-api-access-w7cdd\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.826246 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "6f4d1c84-5193-402a-9264-cd2bb000633c" (UID: "6f4d1c84-5193-402a-9264-cd2bb000633c"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.837098 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_99d95f08-663c-4443-9a16-459f02985879/ovsdbserver-nb/0.log" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.837408 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.895406 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.896164 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3baa7ac-9221-47e0-afb0-25715f0e2491" (UID: "e3baa7ac-9221-47e0-afb0-25715f0e2491"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.928364 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.928398 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.928411 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.952335 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f4d1c84-5193-402a-9264-cd2bb000633c" (UID: "6f4d1c84-5193-402a-9264-cd2bb000633c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.960161 4940 generic.go:334] "Generic (PLEG): container finished" podID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerID="31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076" exitCode=143 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.960301 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"63d173d6-a7cc-42f3-806d-50b9c8f8b189","Type":"ContainerDied","Data":"31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076"} Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.972276 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65d858fd7b-dbln9" event={"ID":"be43a059-c201-4bf3-92ac-304d58de4c02","Type":"ContainerDied","Data":"26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860"} Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.972296 4940 generic.go:334] "Generic (PLEG): container finished" podID="be43a059-c201-4bf3-92ac-304d58de4c02" containerID="26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860" exitCode=143 Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.998534 4940 scope.go:117] "RemoveContainer" containerID="02981c04b83b2ef74e7cdd7032106bee3b1fd93d22f221e3394e2f19eaf07d07" Nov 26 07:17:54 crc kubenswrapper[4940]: I1126 07:17:54.998705 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.000791 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0a428662-7264-4abe-837b-64739810c829" (UID: "0a428662-7264-4abe-837b-64739810c829"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.010058 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_69f4262a-7eb3-4091-b103-393b9ab3a720/ovsdbserver-sb/0.log" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.010377 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.010414 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"69f4262a-7eb3-4091-b103-393b9ab3a720","Type":"ContainerDied","Data":"7ef5a044fb9280b9e442183bbdee0ebab9d8ce9e6b15a5b6173481be74a116a8"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.012990 4940 generic.go:334] "Generic (PLEG): container finished" podID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerID="4bf744e80f9468639290e0122be55a99e227ce639f923bfca4bfba0a9c767cde" exitCode=143 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.013157 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" event={"ID":"2952bb7b-f134-4f55-969b-e30cd8fbe53c","Type":"ContainerDied","Data":"4bf744e80f9468639290e0122be55a99e227ce639f923bfca4bfba0a9c767cde"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.029525 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69f4262a-7eb3-4091-b103-393b9ab3a720" (UID: "69f4262a-7eb3-4091-b103-393b9ab3a720"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.030773 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-metrics-certs-tls-certs\") pod \"99d95f08-663c-4443-9a16-459f02985879\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.032049 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"99d95f08-663c-4443-9a16-459f02985879\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.032152 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-combined-ca-bundle\") pod \"99d95f08-663c-4443-9a16-459f02985879\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.032176 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-ovsdbserver-nb-tls-certs\") pod \"99d95f08-663c-4443-9a16-459f02985879\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.032286 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-scripts\") pod \"99d95f08-663c-4443-9a16-459f02985879\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.032309 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/99d95f08-663c-4443-9a16-459f02985879-ovsdb-rundir\") pod \"99d95f08-663c-4443-9a16-459f02985879\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.032326 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dzm4\" (UniqueName: \"kubernetes.io/projected/99d95f08-663c-4443-9a16-459f02985879-kube-api-access-9dzm4\") pod \"99d95f08-663c-4443-9a16-459f02985879\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.032389 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-config\") pod \"99d95f08-663c-4443-9a16-459f02985879\" (UID: \"99d95f08-663c-4443-9a16-459f02985879\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.033004 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.033025 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.033047 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.033183 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0a428662-7264-4abe-837b-64739810c829" (UID: "0a428662-7264-4abe-837b-64739810c829"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.033839 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-config" (OuterVolumeSpecName: "config") pod "99d95f08-663c-4443-9a16-459f02985879" (UID: "99d95f08-663c-4443-9a16-459f02985879"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.034016 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99d95f08-663c-4443-9a16-459f02985879-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "99d95f08-663c-4443-9a16-459f02985879" (UID: "99d95f08-663c-4443-9a16-459f02985879"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.034202 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-scripts" (OuterVolumeSpecName: "scripts") pod "99d95f08-663c-4443-9a16-459f02985879" (UID: "99d95f08-663c-4443-9a16-459f02985879"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.040260 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99d95f08-663c-4443-9a16-459f02985879-kube-api-access-9dzm4" (OuterVolumeSpecName: "kube-api-access-9dzm4") pod "99d95f08-663c-4443-9a16-459f02985879" (UID: "99d95f08-663c-4443-9a16-459f02985879"). InnerVolumeSpecName "kube-api-access-9dzm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.043095 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "99d95f08-663c-4443-9a16-459f02985879" (UID: "99d95f08-663c-4443-9a16-459f02985879"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.065824 4940 generic.go:334] "Generic (PLEG): container finished" podID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.065905 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k9l7t" event={"ID":"5f26eaaa-63b0-491d-b664-56edff3be80c","Type":"ContainerDied","Data":"3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.078347 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0a428662-7264-4abe-837b-64739810c829" (UID: "0a428662-7264-4abe-837b-64739810c829"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.081678 4940 generic.go:334] "Generic (PLEG): container finished" podID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerID="5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.081744 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7648b55b6f-h7txx" event={"ID":"e9325bed-7edc-41a3-a53c-fb5d147532f5","Type":"ContainerDied","Data":"5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.083565 4940 generic.go:334] "Generic (PLEG): container finished" podID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerID="5d1a5be719119c7cfea6def8095a88a5e3c62d7b6d09bd5c94f113735004fb3f" exitCode=143 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.083783 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf","Type":"ContainerDied","Data":"5d1a5be719119c7cfea6def8095a88a5e3c62d7b6d09bd5c94f113735004fb3f"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.087427 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0a428662-7264-4abe-837b-64739810c829" (UID: "0a428662-7264-4abe-837b-64739810c829"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.098659 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-bf77c95b9-864d6"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.098895 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-bf77c95b9-864d6" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerName="proxy-httpd" containerID="cri-o://9c7f2f809a503ac5e53851c2de3c5ae678c6834ac46092d7cc5557057bdccf99" gracePeriod=30 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.099396 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-bf77c95b9-864d6" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerName="proxy-server" containerID="cri-o://8c41b0e8cdcb6ee2abbf477b7026854cbe03716deb4b56286f7d6da58714b16d" gracePeriod=30 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.115586 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "69f4262a-7eb3-4091-b103-393b9ab3a720" (UID: "69f4262a-7eb3-4091-b103-393b9ab3a720"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127291 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="18fd11a465765ba259762706e393ce42274d6d5ab6b21c460bed17a0534150bb" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127458 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="85ac1cbb7cd8cd7a99e39dcf3fc62fbf9041ad24323c288d9baf670d703ac447" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127475 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="e58705bee99fbf1a356b726ecc7d48c7a1d44cee6e432d30db17de1a4b1bed0c" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127481 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="184283b7cf9ea6a22c37b80e59b65273e5c1e54072b94ba5e98ff402061ac3b7" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127488 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="a370f34c9d0093cf91dd550d53235e78ccb9de14c218c3ae695b5536b1207fa8" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127497 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="ddacc922294d2d9560d232e885d82b0359325dab5663024167d5a82671b91dfe" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127502 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="35af38ce835d55824412db931544d40f54c6a971946a5d4b50c5dfa394ce269c" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127508 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="f7ed6711acf7fdec231f79586d9ef7609a087d63274a505334d0634157294d0a" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127514 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="5c0f62d433b891f3245b23b16d6de813e3eab74a72c1c1978aa9aadf0b7c327d" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127529 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="8d0b0bedcd7f34458be64dffa9614d1cddea6dd92857846272fc36941c4d41da" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127535 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="7d1c1ce5f4f86fefb0522c2b4bb84960ef9691ba82d0c28a857ae9348d2ead68" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127541 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="7a9b2378bf609eb570091f1edebe633dcf651482009fd78a7715a03ed6c3da04" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127547 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="1fb92f00872a9aa36d49326d1cf65db8a9032a280f9587ed0d9216aef9800d95" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127554 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="5832f00fdc7b07d3b583da1f514fceb0172f7918ac8ced3a03dda26a1c0934ea" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127597 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"18fd11a465765ba259762706e393ce42274d6d5ab6b21c460bed17a0534150bb"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127624 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"85ac1cbb7cd8cd7a99e39dcf3fc62fbf9041ad24323c288d9baf670d703ac447"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127635 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"e58705bee99fbf1a356b726ecc7d48c7a1d44cee6e432d30db17de1a4b1bed0c"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127644 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"184283b7cf9ea6a22c37b80e59b65273e5c1e54072b94ba5e98ff402061ac3b7"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127653 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"a370f34c9d0093cf91dd550d53235e78ccb9de14c218c3ae695b5536b1207fa8"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127664 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"ddacc922294d2d9560d232e885d82b0359325dab5663024167d5a82671b91dfe"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127674 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"35af38ce835d55824412db931544d40f54c6a971946a5d4b50c5dfa394ce269c"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127685 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"f7ed6711acf7fdec231f79586d9ef7609a087d63274a505334d0634157294d0a"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127693 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"5c0f62d433b891f3245b23b16d6de813e3eab74a72c1c1978aa9aadf0b7c327d"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127702 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"8d0b0bedcd7f34458be64dffa9614d1cddea6dd92857846272fc36941c4d41da"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127711 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"7d1c1ce5f4f86fefb0522c2b4bb84960ef9691ba82d0c28a857ae9348d2ead68"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127720 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"7a9b2378bf609eb570091f1edebe633dcf651482009fd78a7715a03ed6c3da04"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127730 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"1fb92f00872a9aa36d49326d1cf65db8a9032a280f9587ed0d9216aef9800d95"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.127740 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"5832f00fdc7b07d3b583da1f514fceb0172f7918ac8ced3a03dda26a1c0934ea"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.130634 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" event={"ID":"0a428662-7264-4abe-837b-64739810c829","Type":"ContainerDied","Data":"a472b77a91c351dc13cd9608ed15a6894a9a22674310fb4bfe746ed6d70c4082"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.130757 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.135271 4940 generic.go:334] "Generic (PLEG): container finished" podID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerID="74f237af1603b7679312e5f0ffe714ea2bf8e1fd4fcf073910b8ab40cc83aaff" exitCode=143 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.135343 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"310cee15-c1c5-4db3-8900-ca3107ba130d","Type":"ContainerDied","Data":"74f237af1603b7679312e5f0ffe714ea2bf8e1fd4fcf073910b8ab40cc83aaff"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.137523 4940 generic.go:334] "Generic (PLEG): container finished" podID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerID="f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594" exitCode=143 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.137574 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ff45362c-e19e-470b-9b67-a1c2d6385ba9","Type":"ContainerDied","Data":"f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139235 4940 generic.go:334] "Generic (PLEG): container finished" podID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerID="f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139277 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"71365c4a-71fa-4c50-9c71-b87510dcf548","Type":"ContainerDied","Data":"f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139350 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139379 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/99d95f08-663c-4443-9a16-459f02985879-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139391 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dzm4\" (UniqueName: \"kubernetes.io/projected/99d95f08-663c-4443-9a16-459f02985879-kube-api-access-9dzm4\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139401 4940 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139410 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99d95f08-663c-4443-9a16-459f02985879-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139417 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139443 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139453 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.139462 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.144552 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "6f4d1c84-5193-402a-9264-cd2bb000633c" (UID: "6f4d1c84-5193-402a-9264-cd2bb000633c"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.146101 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6db949d4cf-kdv49" event={"ID":"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16","Type":"ContainerDied","Data":"68f96383a43cb6661fc9011d58327bf045a794abb89b6d697dff06c631c6488d"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.146032 4940 generic.go:334] "Generic (PLEG): container finished" podID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerID="68f96383a43cb6661fc9011d58327bf045a794abb89b6d697dff06c631c6488d" exitCode=143 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.148439 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_99d95f08-663c-4443-9a16-459f02985879/ovsdbserver-nb/0.log" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.148493 4940 generic.go:334] "Generic (PLEG): container finished" podID="99d95f08-663c-4443-9a16-459f02985879" containerID="a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0" exitCode=143 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.148551 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"99d95f08-663c-4443-9a16-459f02985879","Type":"ContainerDied","Data":"a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.148577 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"99d95f08-663c-4443-9a16-459f02985879","Type":"ContainerDied","Data":"2cfe4a05145d2eca576f4252b72dc78b5a8874e64f01e501f560c894ffc01ebb"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.148590 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.155500 4940 generic.go:334] "Generic (PLEG): container finished" podID="90360054-700b-4de8-9f51-f9b19cde50e0" containerID="a5e3b511393841e8f4d387df3c57cc2eeb6640159bbbae1ce4b53392fe9b9546" exitCode=0 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.156068 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"90360054-700b-4de8-9f51-f9b19cde50e0","Type":"ContainerDied","Data":"a5e3b511393841e8f4d387df3c57cc2eeb6640159bbbae1ce4b53392fe9b9546"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.158242 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-5fj2b_e3baa7ac-9221-47e0-afb0-25715f0e2491/openstack-network-exporter/0.log" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.158448 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-5fj2b" event={"ID":"e3baa7ac-9221-47e0-afb0-25715f0e2491","Type":"ContainerDied","Data":"6e422778bc134a6582edbf275bdb7d74591e12bd3473d45fd027375273493b1b"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.158675 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-5fj2b" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.160987 4940 generic.go:334] "Generic (PLEG): container finished" podID="35c6bf07-4770-450e-a19a-02323913fcd4" containerID="af6d07c01a8f45e7b8f9c9d2cebf33c57841077f225e3827093818e8a7e717c3" exitCode=143 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.161072 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6774864d76-mfv42" event={"ID":"35c6bf07-4770-450e-a19a-02323913fcd4","Type":"ContainerDied","Data":"af6d07c01a8f45e7b8f9c9d2cebf33c57841077f225e3827093818e8a7e717c3"} Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.176880 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-config" (OuterVolumeSpecName: "config") pod "0a428662-7264-4abe-837b-64739810c829" (UID: "0a428662-7264-4abe-837b-64739810c829"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.200585 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec" path="/var/lib/kubelet/pods/0f14cc1f-7e2f-441a-9e2b-b8aeedbb26ec/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.201487 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37492950-3851-47fd-ae42-e9009bf4a7bf" path="/var/lib/kubelet/pods/37492950-3851-47fd-ae42-e9009bf4a7bf/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.202174 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40a16b36-c3e9-4537-bf10-89b685489f39" path="/var/lib/kubelet/pods/40a16b36-c3e9-4537-bf10-89b685489f39/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.202966 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "e3baa7ac-9221-47e0-afb0-25715f0e2491" (UID: "e3baa7ac-9221-47e0-afb0-25715f0e2491"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.203053 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4950f291-1a37-4725-8321-fa2e0c39155e" path="/var/lib/kubelet/pods/4950f291-1a37-4725-8321-fa2e0c39155e/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.207495 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f4d1c84-5193-402a-9264-cd2bb000633c" path="/var/lib/kubelet/pods/6f4d1c84-5193-402a-9264-cd2bb000633c/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.210306 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="742ee5ec-ca7e-47f2-becd-5352810d275f" path="/var/lib/kubelet/pods/742ee5ec-ca7e-47f2-becd-5352810d275f/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.210367 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.211663 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b6691fc-2e13-47a2-86ff-cb5350301696" path="/var/lib/kubelet/pods/9b6691fc-2e13-47a2-86ff-cb5350301696/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.212386 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99d95f08-663c-4443-9a16-459f02985879" (UID: "99d95f08-663c-4443-9a16-459f02985879"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.212947 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a29f65e2-c5bd-444a-84d1-7532996c10aa" path="/var/lib/kubelet/pods/a29f65e2-c5bd-444a-84d1-7532996c10aa/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.213994 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8" path="/var/lib/kubelet/pods/f3d6fb0b-a20a-41c3-ba0b-e3cdcc8bebc8/volumes" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.219550 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "69f4262a-7eb3-4091-b103-393b9ab3a720" (UID: "69f4262a-7eb3-4091-b103-393b9ab3a720"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.241127 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3baa7ac-9221-47e0-afb0-25715f0e2491-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.241203 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.241215 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.241226 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f4262a-7eb3-4091-b103-393b9ab3a720-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.241238 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6f4d1c84-5193-402a-9264-cd2bb000633c-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.241249 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a428662-7264-4abe-837b-64739810c829-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.243358 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "99d95f08-663c-4443-9a16-459f02985879" (UID: "99d95f08-663c-4443-9a16-459f02985879"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.246756 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "99d95f08-663c-4443-9a16-459f02985879" (UID: "99d95f08-663c-4443-9a16-459f02985879"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.334285 4940 scope.go:117] "RemoveContainer" containerID="fc517e7c3e97111fb38bbf8de532d3a15807567d75f6bc5c029091d6af85b9a5" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.344210 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.344243 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/99d95f08-663c-4443-9a16-459f02985879-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.367405 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.375026 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.378113 4940 scope.go:117] "RemoveContainer" containerID="94f44e1fde1447ce4f721db395d6b465b51ba7a5c4b84906dede36568bbcb83f" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.408813 4940 scope.go:117] "RemoveContainer" containerID="05b9781e441ba29ec642551555aa92180638341b151ecdd169347a950574b1fc" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.430429 4940 scope.go:117] "RemoveContainer" containerID="8bb22126f7e8fb3758222b160d460d1e7ad358b951ff13819a57073af26e80b8" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.467118 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7f54fb65-6vndb"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.470668 4940 scope.go:117] "RemoveContainer" containerID="a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.484151 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d7f54fb65-6vndb"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.506913 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.518069 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.530146 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.552487 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.552548 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data podName:69972749-03ff-48e9-b031-99c33ce86e96 nodeName:}" failed. No retries permitted until 2025-11-26 07:17:59.552530386 +0000 UTC m=+1381.072672005 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data") pod "rabbitmq-cell1-server-0" (UID: "69972749-03ff-48e9-b031-99c33ce86e96") : configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.560211 4940 scope.go:117] "RemoveContainer" containerID="a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.560505 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-5fj2b"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.595993 4940 scope.go:117] "RemoveContainer" containerID="a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.596105 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-5fj2b"] Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.600977 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13\": container with ID starting with a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13 not found: ID does not exist" containerID="a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.601017 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13"} err="failed to get container status \"a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13\": rpc error: code = NotFound desc = could not find container \"a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13\": container with ID starting with a9e3d1f84e8a2b5eca0962223194cc4731077c56b8fbff6659379dc794339a13 not found: ID does not exist" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.601054 4940 scope.go:117] "RemoveContainer" containerID="a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0" Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.602599 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0\": container with ID starting with a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0 not found: ID does not exist" containerID="a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.602637 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0"} err="failed to get container status \"a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0\": rpc error: code = NotFound desc = could not find container \"a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0\": container with ID starting with a04a9f4b25f1621c56c767dbe2191cb0aec159b3430f3a5e5b98fc4ffc2b22a0 not found: ID does not exist" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.602656 4940 scope.go:117] "RemoveContainer" containerID="dd7562cc322c44e51d88e100358bb3ef6a12495c3a5c5238e4a92c094bcd8272" Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.618662 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.619199 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.619668 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.619701 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.622639 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:17:55 crc kubenswrapper[4940]: W1126 07:17:55.625929 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb19b71d_413f_46df_a509_7dc7aff75598.slice/crio-4d5b8ae7cec5c94611367b827bd086868f59549c03572a3e35c8f269b50a6d8c WatchSource:0}: Error finding container 4d5b8ae7cec5c94611367b827bd086868f59549c03572a3e35c8f269b50a6d8c: Status 404 returned error can't find the container with id 4d5b8ae7cec5c94611367b827bd086868f59549c03572a3e35c8f269b50a6d8c Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.629256 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.630122 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.630647 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.630935 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.637261 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron2ea3-account-delete-7wj9l"] Nov 26 07:17:55 crc kubenswrapper[4940]: W1126 07:17:55.640487 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a6c9a20_06bc_43f8_aad9_fb5d72231110.slice/crio-2601733878ca9b8e42d76c0a11edd9715f18a3944030e68357da86382a6bf2a3 WatchSource:0}: Error finding container 2601733878ca9b8e42d76c0a11edd9715f18a3944030e68357da86382a6bf2a3: Status 404 returned error can't find the container with id 2601733878ca9b8e42d76c0a11edd9715f18a3944030e68357da86382a6bf2a3 Nov 26 07:17:55 crc kubenswrapper[4940]: W1126 07:17:55.642312 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode67a9769_a54d_4b7a_ac04_dcbe4bc5662c.slice/crio-1dff2bb7fa3b12cd5b4e6d018c62e67ce2a2a4cecdd2961b0f1c3f7bea262fe5 WatchSource:0}: Error finding container 1dff2bb7fa3b12cd5b4e6d018c62e67ce2a2a4cecdd2961b0f1c3f7bea262fe5: Status 404 returned error can't find the container with id 1dff2bb7fa3b12cd5b4e6d018c62e67ce2a2a4cecdd2961b0f1c3f7bea262fe5 Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.654589 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-vencrypt-tls-certs\") pod \"90360054-700b-4de8-9f51-f9b19cde50e0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.654664 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-config-data\") pod \"90360054-700b-4de8-9f51-f9b19cde50e0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.654718 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rztjt\" (UniqueName: \"kubernetes.io/projected/90360054-700b-4de8-9f51-f9b19cde50e0-kube-api-access-rztjt\") pod \"90360054-700b-4de8-9f51-f9b19cde50e0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.658299 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-nova-novncproxy-tls-certs\") pod \"90360054-700b-4de8-9f51-f9b19cde50e0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.658467 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-combined-ca-bundle\") pod \"90360054-700b-4de8-9f51-f9b19cde50e0\" (UID: \"90360054-700b-4de8-9f51-f9b19cde50e0\") " Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.659272 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 07:17:55 crc kubenswrapper[4940]: E1126 07:17:55.659321 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data podName:11a17576-9a94-4e2d-8915-9d838de09f0b nodeName:}" failed. No retries permitted until 2025-11-26 07:17:59.659305983 +0000 UTC m=+1381.179447602 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data") pod "rabbitmq-server-0" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b") : configmap "rabbitmq-config-data" not found Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.667732 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementf807-account-delete-wgr4g"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.674359 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90360054-700b-4de8-9f51-f9b19cde50e0-kube-api-access-rztjt" (OuterVolumeSpecName: "kube-api-access-rztjt") pod "90360054-700b-4de8-9f51-f9b19cde50e0" (UID: "90360054-700b-4de8-9f51-f9b19cde50e0"). InnerVolumeSpecName "kube-api-access-rztjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.674478 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance6413-account-delete-nckhw"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.682556 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder7f7c-account-delete-rkd24"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.697916 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-config-data" (OuterVolumeSpecName: "config-data") pod "90360054-700b-4de8-9f51-f9b19cde50e0" (UID: "90360054-700b-4de8-9f51-f9b19cde50e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.706371 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "90360054-700b-4de8-9f51-f9b19cde50e0" (UID: "90360054-700b-4de8-9f51-f9b19cde50e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.733283 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "90360054-700b-4de8-9f51-f9b19cde50e0" (UID: "90360054-700b-4de8-9f51-f9b19cde50e0"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.736401 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "90360054-700b-4de8-9f51-f9b19cde50e0" (UID: "90360054-700b-4de8-9f51-f9b19cde50e0"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.754812 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="69972749-03ff-48e9-b031-99c33ce86e96" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.761833 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.761930 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-galera-tls-certs\") pod \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.761978 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-combined-ca-bundle\") pod \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762054 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-operator-scripts\") pod \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762126 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kolla-config\") pod \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762212 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-generated\") pod \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762324 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-default\") pod \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762356 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdrdt\" (UniqueName: \"kubernetes.io/projected/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kube-api-access-tdrdt\") pod \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\" (UID: \"3f53eb12-2c7d-4107-9d63-f0db8e983d90\") " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762852 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762871 4940 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762883 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762895 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rztjt\" (UniqueName: \"kubernetes.io/projected/90360054-700b-4de8-9f51-f9b19cde50e0-kube-api-access-rztjt\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.762908 4940 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/90360054-700b-4de8-9f51-f9b19cde50e0-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.763479 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "3f53eb12-2c7d-4107-9d63-f0db8e983d90" (UID: "3f53eb12-2c7d-4107-9d63-f0db8e983d90"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.764207 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "3f53eb12-2c7d-4107-9d63-f0db8e983d90" (UID: "3f53eb12-2c7d-4107-9d63-f0db8e983d90"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.764267 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "3f53eb12-2c7d-4107-9d63-f0db8e983d90" (UID: "3f53eb12-2c7d-4107-9d63-f0db8e983d90"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.764455 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3f53eb12-2c7d-4107-9d63-f0db8e983d90" (UID: "3f53eb12-2c7d-4107-9d63-f0db8e983d90"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.784064 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kube-api-access-tdrdt" (OuterVolumeSpecName: "kube-api-access-tdrdt") pod "3f53eb12-2c7d-4107-9d63-f0db8e983d90" (UID: "3f53eb12-2c7d-4107-9d63-f0db8e983d90"). InnerVolumeSpecName "kube-api-access-tdrdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.792622 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican2dca-account-delete-4kp2b"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.793774 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "mysql-db") pod "3f53eb12-2c7d-4107-9d63-f0db8e983d90" (UID: "3f53eb12-2c7d-4107-9d63-f0db8e983d90"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.814486 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f53eb12-2c7d-4107-9d63-f0db8e983d90" (UID: "3f53eb12-2c7d-4107-9d63-f0db8e983d90"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.856361 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "3f53eb12-2c7d-4107-9d63-f0db8e983d90" (UID: "3f53eb12-2c7d-4107-9d63-f0db8e983d90"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.884718 4940 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.884751 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.884763 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.884774 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdrdt\" (UniqueName: \"kubernetes.io/projected/3f53eb12-2c7d-4107-9d63-f0db8e983d90-kube-api-access-tdrdt\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.884800 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.884811 4940 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.884822 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f53eb12-2c7d-4107-9d63-f0db8e983d90-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.884833 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3f53eb12-2c7d-4107-9d63-f0db8e983d90-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.954419 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.973935 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi31fa-account-delete-fjlwz"] Nov 26 07:17:55 crc kubenswrapper[4940]: I1126 07:17:55.995523 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.039936 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.040270 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="160032d4-a9c0-4b2c-be8b-f4a5c188c451" containerName="nova-cell1-conductor-conductor" containerID="cri-o://af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731" gracePeriod=30 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.092973 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z542t"] Nov 26 07:17:56 crc kubenswrapper[4940]: W1126 07:17:56.096289 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96cf8cc9_e0ee_4aa2_9842_83d45e24c46a.slice/crio-3a35fcf450a2778c7357bf8d33f90b0f5e57f498b80f86c9155aafd80eb0ecff WatchSource:0}: Error finding container 3a35fcf450a2778c7357bf8d33f90b0f5e57f498b80f86c9155aafd80eb0ecff: Status 404 returned error can't find the container with id 3a35fcf450a2778c7357bf8d33f90b0f5e57f498b80f86c9155aafd80eb0ecff Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.106478 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.110798 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z542t"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.135152 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bf4g9"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.154340 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.154575 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="f93321ef-2519-4bc0-b3d1-a45194267ca6" containerName="nova-cell0-conductor-conductor" containerID="cri-o://e146e6ba7d8a0d8f7e78795f75427c31b1e5c239738564750f702ee38a6b5f61" gracePeriod=30 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.171649 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0ee23-account-delete-qkbnr"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.188331 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-bf4g9"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.203696 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.203926 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" containerName="nova-scheduler-scheduler" containerID="cri-o://be9a79f6e9b952d84bd13f052112cb7a980f7370fa0b6edda5ce2ca0596c774c" gracePeriod=30 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.212965 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0ee23-account-delete-qkbnr" event={"ID":"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a","Type":"ContainerStarted","Data":"3a35fcf450a2778c7357bf8d33f90b0f5e57f498b80f86c9155aafd80eb0ecff"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.215030 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder7f7c-account-delete-rkd24" event={"ID":"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c","Type":"ContainerStarted","Data":"53d8c6b6abfceb5f620ed0f9437eb514b359bef4a1edb590086b6b445c8b72bf"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.215091 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder7f7c-account-delete-rkd24" event={"ID":"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c","Type":"ContainerStarted","Data":"1dff2bb7fa3b12cd5b4e6d018c62e67ce2a2a4cecdd2961b0f1c3f7bea262fe5"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.216230 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"90360054-700b-4de8-9f51-f9b19cde50e0","Type":"ContainerDied","Data":"169a79d6843b929d05adb9ff8717c20874f5ae70bc665e62df1f65007ff60e50"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.216265 4940 scope.go:117] "RemoveContainer" containerID="a5e3b511393841e8f4d387df3c57cc2eeb6640159bbbae1ce4b53392fe9b9546" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.216388 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.246143 4940 generic.go:334] "Generic (PLEG): container finished" podID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" containerID="6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab" exitCode=0 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.246203 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3f53eb12-2c7d-4107-9d63-f0db8e983d90","Type":"ContainerDied","Data":"6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.246225 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3f53eb12-2c7d-4107-9d63-f0db8e983d90","Type":"ContainerDied","Data":"035f9273b7b7ca13ed24a3f44a51aea64f6a3e8a5083b9795e0cb4bd1cc6766c"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.246286 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.251821 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementf807-account-delete-wgr4g" event={"ID":"eb19b71d-413f-46df-a509-7dc7aff75598","Type":"ContainerStarted","Data":"4d5b8ae7cec5c94611367b827bd086868f59549c03572a3e35c8f269b50a6d8c"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.254275 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance6413-account-delete-nckhw" event={"ID":"da80f9db-6be1-459c-9d61-ca1fc206d472","Type":"ContainerStarted","Data":"679a6b9824ac88ddfe86a5b89034695430ae6a0d125833692c2d6d5edec98f14"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.258639 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron2ea3-account-delete-7wj9l" event={"ID":"4a6c9a20-06bc-43f8-aad9-fb5d72231110","Type":"ContainerStarted","Data":"0e5f1295bc9bd05f924d7ad125718c5325b29da946b426629ce0de7b927b2d74"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.258690 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron2ea3-account-delete-7wj9l" event={"ID":"4a6c9a20-06bc-43f8-aad9-fb5d72231110","Type":"ContainerStarted","Data":"2601733878ca9b8e42d76c0a11edd9715f18a3944030e68357da86382a6bf2a3"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.259987 4940 scope.go:117] "RemoveContainer" containerID="6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.271455 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican2dca-account-delete-4kp2b" event={"ID":"59926125-c1e1-4ac6-aa0e-2c4256046612","Type":"ContainerStarted","Data":"62bcb5e1a9b9b67e4686267c3741b8d746921c8ea6d5b584a6ece17ed7a7472b"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.277292 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron2ea3-account-delete-7wj9l" podStartSLOduration=4.277275432 podStartE2EDuration="4.277275432s" podCreationTimestamp="2025-11-26 07:17:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:17:56.274530505 +0000 UTC m=+1377.794672124" watchObservedRunningTime="2025-11-26 07:17:56.277275432 +0000 UTC m=+1377.797417051" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.308790 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.313142 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi31fa-account-delete-fjlwz" event={"ID":"9abf09e3-dd14-42f4-8b1d-de23d9f0f218","Type":"ContainerStarted","Data":"de9f45f3fb0f077cbe530ea44f2493e6f12c4d95e52c74bb0598fd4b309aa534"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.320115 4940 scope.go:117] "RemoveContainer" containerID="4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.328645 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.330954 4940 generic.go:334] "Generic (PLEG): container finished" podID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerID="8c41b0e8cdcb6ee2abbf477b7026854cbe03716deb4b56286f7d6da58714b16d" exitCode=0 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.330983 4940 generic.go:334] "Generic (PLEG): container finished" podID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerID="9c7f2f809a503ac5e53851c2de3c5ae678c6834ac46092d7cc5557057bdccf99" exitCode=0 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.331029 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-bf77c95b9-864d6" event={"ID":"01a8836d-ba47-44ef-995e-f5bf2227dcd4","Type":"ContainerDied","Data":"8c41b0e8cdcb6ee2abbf477b7026854cbe03716deb4b56286f7d6da58714b16d"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.331091 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-bf77c95b9-864d6" event={"ID":"01a8836d-ba47-44ef-995e-f5bf2227dcd4","Type":"ContainerDied","Data":"9c7f2f809a503ac5e53851c2de3c5ae678c6834ac46092d7cc5557057bdccf99"} Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.341573 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.356740 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.525546 4940 scope.go:117] "RemoveContainer" containerID="6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab" Nov 26 07:17:56 crc kubenswrapper[4940]: E1126 07:17:56.527450 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab\": container with ID starting with 6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab not found: ID does not exist" containerID="6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.527479 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab"} err="failed to get container status \"6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab\": rpc error: code = NotFound desc = could not find container \"6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab\": container with ID starting with 6b5781886a9bf794d9d7e7d622e1cc8a0ee0adf6832b41d048b74308c5b158ab not found: ID does not exist" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.527499 4940 scope.go:117] "RemoveContainer" containerID="4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b" Nov 26 07:17:56 crc kubenswrapper[4940]: E1126 07:17:56.527853 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b\": container with ID starting with 4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b not found: ID does not exist" containerID="4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.527891 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b"} err="failed to get container status \"4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b\": rpc error: code = NotFound desc = could not find container \"4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b\": container with ID starting with 4bb798499804bbef11185e3101b912b0ee0ebeef9283bcc97d172258cc33f18b not found: ID does not exist" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.636563 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.797475 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.797749 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="ceilometer-central-agent" containerID="cri-o://aa12214a40e17b396195f7c3910988635f60c08b39041e13aa87c7d6cc3c84be" gracePeriod=30 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.798156 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="proxy-httpd" containerID="cri-o://9a54934efee8d36f8fe7bb895361ca1c8eea974267d635a96b512ed26e1494b7" gracePeriod=30 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.798208 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="sg-core" containerID="cri-o://54bb0d6665161c60a8df29f40f934d0511950d32bca2d3f2401e1f8db929d7db" gracePeriod=30 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.799156 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="ceilometer-notification-agent" containerID="cri-o://5fcae63004fc694c125bd0895a86667cfe1b52745e785eba586d24f5dff5b67d" gracePeriod=30 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.818627 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-combined-ca-bundle\") pod \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.818684 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-internal-tls-certs\") pod \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.818726 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-log-httpd\") pod \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.818808 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-config-data\") pod \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.818907 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-etc-swift\") pod \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.818931 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpbs2\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-kube-api-access-vpbs2\") pod \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.819501 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "01a8836d-ba47-44ef-995e-f5bf2227dcd4" (UID: "01a8836d-ba47-44ef-995e-f5bf2227dcd4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.819815 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-public-tls-certs\") pod \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.819865 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-run-httpd\") pod \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\" (UID: \"01a8836d-ba47-44ef-995e-f5bf2227dcd4\") " Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.820310 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.820582 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "01a8836d-ba47-44ef-995e-f5bf2227dcd4" (UID: "01a8836d-ba47-44ef-995e-f5bf2227dcd4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.842738 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-kube-api-access-vpbs2" (OuterVolumeSpecName: "kube-api-access-vpbs2") pod "01a8836d-ba47-44ef-995e-f5bf2227dcd4" (UID: "01a8836d-ba47-44ef-995e-f5bf2227dcd4"). InnerVolumeSpecName "kube-api-access-vpbs2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.844723 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "01a8836d-ba47-44ef-995e-f5bf2227dcd4" (UID: "01a8836d-ba47-44ef-995e-f5bf2227dcd4"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.858817 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.859112 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="3031ed6c-4ad3-4d47-a902-4a52bb40be6d" containerName="kube-state-metrics" containerID="cri-o://dc48ce55d4720539d72187d51174a10a64823a9c28973e4e5b3bfa0a2a2a6c23" gracePeriod=30 Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.925482 4940 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.925527 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpbs2\" (UniqueName: \"kubernetes.io/projected/01a8836d-ba47-44ef-995e-f5bf2227dcd4-kube-api-access-vpbs2\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.925541 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/01a8836d-ba47-44ef-995e-f5bf2227dcd4-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.962390 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 26 07:17:56 crc kubenswrapper[4940]: I1126 07:17:56.962593 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="69f67fa7-ea74-4966-b69c-ab547896057e" containerName="memcached" containerID="cri-o://f62286900f59e04e8b5935f72d22351db72e1e90c13dfd3c197b2183c1ddd5c0" gracePeriod=30 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.074070 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-config-data" (OuterVolumeSpecName: "config-data") pod "01a8836d-ba47-44ef-995e-f5bf2227dcd4" (UID: "01a8836d-ba47-44ef-995e-f5bf2227dcd4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.087934 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-mkt8h"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.103562 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-mkt8h"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.110858 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-xpsrr"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.120118 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-xpsrr"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.129107 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6f4459f4df-b92xj"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.129359 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-6f4459f4df-b92xj" podUID="77513168-a1ea-4794-a859-b942b0e9c262" containerName="keystone-api" containerID="cri-o://91bd7e89a1d7d0eb89a61cc1bbe9824e672e433b9e8a1ae66449fbaea8335372" gracePeriod=30 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.129813 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "01a8836d-ba47-44ef-995e-f5bf2227dcd4" (UID: "01a8836d-ba47-44ef-995e-f5bf2227dcd4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.141531 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.149469 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6b40-account-create-update-bch2t"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.159255 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-hnrhd"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.164064 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.164089 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.173195 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "01a8836d-ba47-44ef-995e-f5bf2227dcd4" (UID: "01a8836d-ba47-44ef-995e-f5bf2227dcd4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.203413 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a428662-7264-4abe-837b-64739810c829" path="/var/lib/kubelet/pods/0a428662-7264-4abe-837b-64739810c829/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.204374 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "01a8836d-ba47-44ef-995e-f5bf2227dcd4" (UID: "01a8836d-ba47-44ef-995e-f5bf2227dcd4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.205690 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d22c5e6-6595-4926-9525-cc5e90134b3c" path="/var/lib/kubelet/pods/0d22c5e6-6595-4926-9525-cc5e90134b3c/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.206330 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ffadde9-94cd-4f89-b270-e4a533f5399c" path="/var/lib/kubelet/pods/1ffadde9-94cd-4f89-b270-e4a533f5399c/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.206919 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" path="/var/lib/kubelet/pods/3f53eb12-2c7d-4107-9d63-f0db8e983d90/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.208151 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" path="/var/lib/kubelet/pods/69f4262a-7eb3-4091-b103-393b9ab3a720/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.208913 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90360054-700b-4de8-9f51-f9b19cde50e0" path="/var/lib/kubelet/pods/90360054-700b-4de8-9f51-f9b19cde50e0/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.209433 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a" path="/var/lib/kubelet/pods/94dc0ebf-17e6-4c5a-9263-0fe98d7cc80a/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.210476 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99d95f08-663c-4443-9a16-459f02985879" path="/var/lib/kubelet/pods/99d95f08-663c-4443-9a16-459f02985879/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.211192 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4735f75-b279-4137-9496-36fd315fa8e8" path="/var/lib/kubelet/pods/b4735f75-b279-4137-9496-36fd315fa8e8/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.211674 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3baa7ac-9221-47e0-afb0-25715f0e2491" path="/var/lib/kubelet/pods/e3baa7ac-9221-47e0-afb0-25715f0e2491/volumes" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.217922 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-hnrhd"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.217964 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-6b40-account-create-update-bch2t"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.230974 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6774864d76-mfv42" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": dial tcp 10.217.0.158:9311: connect: connection refused" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.231107 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6774864d76-mfv42" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.158:9311/healthcheck\": dial tcp 10.217.0.158:9311: connect: connection refused" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.267530 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.267555 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01a8836d-ba47-44ef-995e-f5bf2227dcd4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.273214 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-p24mk"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.294316 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-p24mk"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.305856 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance6413-account-delete-nckhw"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.313109 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-6413-account-create-update-xg5vm"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.324512 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-6413-account-create-update-xg5vm"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.361268 4940 generic.go:334] "Generic (PLEG): container finished" podID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerID="0ffb3bf39d8a1f37181283238b716fcbcfefeaecec4ce53764db5c4edb369cee" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.361368 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"79fc2ca6-8d13-44e6-83da-033c7f2d7df3","Type":"ContainerDied","Data":"0ffb3bf39d8a1f37181283238b716fcbcfefeaecec4ce53764db5c4edb369cee"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.366498 4940 generic.go:334] "Generic (PLEG): container finished" podID="3031ed6c-4ad3-4d47-a902-4a52bb40be6d" containerID="dc48ce55d4720539d72187d51174a10a64823a9c28973e4e5b3bfa0a2a2a6c23" exitCode=2 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.366568 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3031ed6c-4ad3-4d47-a902-4a52bb40be6d","Type":"ContainerDied","Data":"dc48ce55d4720539d72187d51174a10a64823a9c28973e4e5b3bfa0a2a2a6c23"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.378314 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi31fa-account-delete-fjlwz" event={"ID":"9abf09e3-dd14-42f4-8b1d-de23d9f0f218","Type":"ContainerStarted","Data":"d13a5ed30c21b3f298bcf01eebabe76fcf8233b02bfcb43d1775bcd1e984e502"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.380713 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.383997 4940 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapi31fa-account-delete-fjlwz" secret="" err="secret \"galera-openstack-dockercfg-f9gc5\" not found" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.406018 4940 generic.go:334] "Generic (PLEG): container finished" podID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerID="9a54934efee8d36f8fe7bb895361ca1c8eea974267d635a96b512ed26e1494b7" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.406068 4940 generic.go:334] "Generic (PLEG): container finished" podID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerID="54bb0d6665161c60a8df29f40f934d0511950d32bca2d3f2401e1f8db929d7db" exitCode=2 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.406136 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerDied","Data":"9a54934efee8d36f8fe7bb895361ca1c8eea974267d635a96b512ed26e1494b7"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.406160 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerDied","Data":"54bb0d6665161c60a8df29f40f934d0511950d32bca2d3f2401e1f8db929d7db"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.426430 4940 generic.go:334] "Generic (PLEG): container finished" podID="59926125-c1e1-4ac6-aa0e-2c4256046612" containerID="ff7cc02587d72ccd72860ca1a6ae9d091cb7065bd1a5b449325803e602ba872d" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.426567 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican2dca-account-delete-4kp2b" event={"ID":"59926125-c1e1-4ac6-aa0e-2c4256046612","Type":"ContainerDied","Data":"ff7cc02587d72ccd72860ca1a6ae9d091cb7065bd1a5b449325803e602ba872d"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.427008 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerName="galera" containerID="cri-o://e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47" gracePeriod=30 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.435616 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novaapi31fa-account-delete-fjlwz" podStartSLOduration=5.43558963 podStartE2EDuration="5.43558963s" podCreationTimestamp="2025-11-26 07:17:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:17:57.393919498 +0000 UTC m=+1378.914061127" watchObservedRunningTime="2025-11-26 07:17:57.43558963 +0000 UTC m=+1378.955731249" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.459502 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-bf77c95b9-864d6" event={"ID":"01a8836d-ba47-44ef-995e-f5bf2227dcd4","Type":"ContainerDied","Data":"94738a326a2732009ec6dee1d52ac3fd748e031b48f1b630d1b45377b3aabae2"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.459598 4940 scope.go:117] "RemoveContainer" containerID="8c41b0e8cdcb6ee2abbf477b7026854cbe03716deb4b56286f7d6da58714b16d" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.459877 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-bf77c95b9-864d6" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.473803 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0ee23-account-delete-qkbnr" event={"ID":"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a","Type":"ContainerStarted","Data":"8e24898a0ff112e87bf65f7928b22b6ce54972a826d301fced34470797356e71"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.474314 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data-custom\") pod \"71365c4a-71fa-4c50-9c71-b87510dcf548\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.474364 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-scripts\") pod \"71365c4a-71fa-4c50-9c71-b87510dcf548\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.474475 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71365c4a-71fa-4c50-9c71-b87510dcf548-etc-machine-id\") pod \"71365c4a-71fa-4c50-9c71-b87510dcf548\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.474523 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data\") pod \"71365c4a-71fa-4c50-9c71-b87510dcf548\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.474564 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-combined-ca-bundle\") pod \"71365c4a-71fa-4c50-9c71-b87510dcf548\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.474609 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6kg2\" (UniqueName: \"kubernetes.io/projected/71365c4a-71fa-4c50-9c71-b87510dcf548-kube-api-access-j6kg2\") pod \"71365c4a-71fa-4c50-9c71-b87510dcf548\" (UID: \"71365c4a-71fa-4c50-9c71-b87510dcf548\") " Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.475589 4940 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.475632 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts podName:9abf09e3-dd14-42f4-8b1d-de23d9f0f218 nodeName:}" failed. No retries permitted until 2025-11-26 07:17:57.975617099 +0000 UTC m=+1379.495758718 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts") pod "novaapi31fa-account-delete-fjlwz" (UID: "9abf09e3-dd14-42f4-8b1d-de23d9f0f218") : configmap "openstack-scripts" not found Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.475796 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/71365c4a-71fa-4c50-9c71-b87510dcf548-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "71365c4a-71fa-4c50-9c71-b87510dcf548" (UID: "71365c4a-71fa-4c50-9c71-b87510dcf548"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.481416 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-scripts" (OuterVolumeSpecName: "scripts") pod "71365c4a-71fa-4c50-9c71-b87510dcf548" (UID: "71365c4a-71fa-4c50-9c71-b87510dcf548"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.481600 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "71365c4a-71fa-4c50-9c71-b87510dcf548" (UID: "71365c4a-71fa-4c50-9c71-b87510dcf548"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.484249 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71365c4a-71fa-4c50-9c71-b87510dcf548-kube-api-access-j6kg2" (OuterVolumeSpecName: "kube-api-access-j6kg2") pod "71365c4a-71fa-4c50-9c71-b87510dcf548" (UID: "71365c4a-71fa-4c50-9c71-b87510dcf548"). InnerVolumeSpecName "kube-api-access-j6kg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.490230 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": read tcp 10.217.0.2:39368->10.217.0.200:8775: read: connection reset by peer" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.490255 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": read tcp 10.217.0.2:39376->10.217.0.200:8775: read: connection reset by peer" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.492886 4940 generic.go:334] "Generic (PLEG): container finished" podID="4a6c9a20-06bc-43f8-aad9-fb5d72231110" containerID="0e5f1295bc9bd05f924d7ad125718c5325b29da946b426629ce0de7b927b2d74" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.493078 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-zzrpz"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.493098 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron2ea3-account-delete-7wj9l" event={"ID":"4a6c9a20-06bc-43f8-aad9-fb5d72231110","Type":"ContainerDied","Data":"0e5f1295bc9bd05f924d7ad125718c5325b29da946b426629ce0de7b927b2d74"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.539033 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-zzrpz"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.550924 4940 generic.go:334] "Generic (PLEG): container finished" podID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerID="240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.550996 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"71365c4a-71fa-4c50-9c71-b87510dcf548","Type":"ContainerDied","Data":"240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.551028 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"71365c4a-71fa-4c50-9c71-b87510dcf548","Type":"ContainerDied","Data":"35a80012c45a955a1127f9daf12c303909b9aafa34d91931ff69d8959b162c09"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.551103 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.559485 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.163:8776/healthcheck\": read tcp 10.217.0.2:45134->10.217.0.163:8776: read: connection reset by peer" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.565123 4940 generic.go:334] "Generic (PLEG): container finished" podID="eb19b71d-413f-46df-a509-7dc7aff75598" containerID="f1b02b23cdc5b3262b580dd8d36232c1e597b070d2f207d82ecf8dc91ab182c1" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.565209 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementf807-account-delete-wgr4g" event={"ID":"eb19b71d-413f-46df-a509-7dc7aff75598","Type":"ContainerDied","Data":"f1b02b23cdc5b3262b580dd8d36232c1e597b070d2f207d82ecf8dc91ab182c1"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.578296 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.578327 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.578336 4940 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71365c4a-71fa-4c50-9c71-b87510dcf548-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.578345 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6kg2\" (UniqueName: \"kubernetes.io/projected/71365c4a-71fa-4c50-9c71-b87510dcf548-kube-api-access-j6kg2\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.579682 4940 generic.go:334] "Generic (PLEG): container finished" podID="e67a9769-a54d-4b7a-ac04-dcbe4bc5662c" containerID="53d8c6b6abfceb5f620ed0f9437eb514b359bef4a1edb590086b6b445c8b72bf" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.579817 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder7f7c-account-delete-rkd24" event={"ID":"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c","Type":"ContainerDied","Data":"53d8c6b6abfceb5f620ed0f9437eb514b359bef4a1edb590086b6b445c8b72bf"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.595248 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "71365c4a-71fa-4c50-9c71-b87510dcf548" (UID: "71365c4a-71fa-4c50-9c71-b87510dcf548"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.597400 4940 generic.go:334] "Generic (PLEG): container finished" podID="35c6bf07-4770-450e-a19a-02323913fcd4" containerID="38ebe7a40f2302221b977bb730a4dafa3b829dbc717ce7001d421d16b83d3667" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.597496 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6774864d76-mfv42" event={"ID":"35c6bf07-4770-450e-a19a-02323913fcd4","Type":"ContainerDied","Data":"38ebe7a40f2302221b977bb730a4dafa3b829dbc717ce7001d421d16b83d3667"} Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.620537 4940 generic.go:334] "Generic (PLEG): container finished" podID="da80f9db-6be1-459c-9d61-ca1fc206d472" containerID="1396c22b993b0aca90db40c1aeddefa5385ff3803bd17b220761d0266e9bcfa2" exitCode=0 Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.620600 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance6413-account-delete-nckhw" event={"ID":"da80f9db-6be1-459c-9d61-ca1fc206d472","Type":"ContainerDied","Data":"1396c22b993b0aca90db40c1aeddefa5385ff3803bd17b220761d0266e9bcfa2"} Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.643426 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.643567 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-2ea3-account-create-update-f9hmh"] Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.646832 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.650334 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.650409 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerName="galera" Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.669431 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="be9a79f6e9b952d84bd13f052112cb7a980f7370fa0b6edda5ce2ca0596c774c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.680622 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron2ea3-account-delete-7wj9l"] Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.684519 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="be9a79f6e9b952d84bd13f052112cb7a980f7370fa0b6edda5ce2ca0596c774c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.689242 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data" (OuterVolumeSpecName: "config-data") pod "71365c4a-71fa-4c50-9c71-b87510dcf548" (UID: "71365c4a-71fa-4c50-9c71-b87510dcf548"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.690909 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.690972 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71365c4a-71fa-4c50-9c71-b87510dcf548-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.729811 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-2ea3-account-create-update-f9hmh"] Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.745480 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="be9a79f6e9b952d84bd13f052112cb7a980f7370fa0b6edda5ce2ca0596c774c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:17:57 crc kubenswrapper[4940]: E1126 07:17:57.745552 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" containerName="nova-scheduler-scheduler" Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.754717 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-h6bf9"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.781088 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-h6bf9"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.788552 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-f807-account-create-update-7xz9t"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.810013 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-f807-account-create-update-7xz9t"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.828204 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementf807-account-delete-wgr4g"] Nov 26 07:17:57 crc kubenswrapper[4940]: I1126 07:17:57.973113 4940 scope.go:117] "RemoveContainer" containerID="9c7f2f809a503ac5e53851c2de3c5ae678c6834ac46092d7cc5557057bdccf99" Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.012294 4940 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.012356 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts podName:9abf09e3-dd14-42f4-8b1d-de23d9f0f218 nodeName:}" failed. No retries permitted until 2025-11-26 07:17:59.012341842 +0000 UTC m=+1380.532483461 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts") pod "novaapi31fa-account-delete-fjlwz" (UID: "9abf09e3-dd14-42f4-8b1d-de23d9f0f218") : configmap "openstack-scripts" not found Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.032414 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-sdhbb"] Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.039515 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.044867 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-sdhbb"] Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.047608 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.059595 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.059676 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="160032d4-a9c0-4b2c-be8b-f4a5c188c451" containerName="nova-cell1-conductor-conductor" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.062560 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-bf77c95b9-864d6"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.077155 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder7f7c-account-delete-rkd24"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.084374 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-bf77c95b9-864d6"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.091739 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-7f7c-account-create-update-j9m8h"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.098335 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-7f7c-account-create-update-j9m8h"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.105746 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-gb562"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.114387 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-gb562"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.146734 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi31fa-account-delete-fjlwz"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.162440 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-31fa-account-create-update-vxmcw"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.180425 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-31fa-account-create-update-vxmcw"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.198563 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-pp2g5"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.219090 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-pp2g5"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.249637 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0ee23-account-delete-qkbnr"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.263780 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-ee23-account-create-update-j7nlg"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.265206 4940 scope.go:117] "RemoveContainer" containerID="f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.271563 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-ee23-account-create-update-j7nlg"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.276329 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.280863 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.282241 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.303197 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.315634 4940 scope.go:117] "RemoveContainer" containerID="240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.316180 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-public-tls-certs\") pod \"35c6bf07-4770-450e-a19a-02323913fcd4\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.316347 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-combined-ca-bundle\") pod \"35c6bf07-4770-450e-a19a-02323913fcd4\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.316424 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-internal-tls-certs\") pod \"35c6bf07-4770-450e-a19a-02323913fcd4\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.316449 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data\") pod \"35c6bf07-4770-450e-a19a-02323913fcd4\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.316547 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35c6bf07-4770-450e-a19a-02323913fcd4-logs\") pod \"35c6bf07-4770-450e-a19a-02323913fcd4\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.316631 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bp49\" (UniqueName: \"kubernetes.io/projected/35c6bf07-4770-450e-a19a-02323913fcd4-kube-api-access-2bp49\") pod \"35c6bf07-4770-450e-a19a-02323913fcd4\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.316693 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data-custom\") pod \"35c6bf07-4770-450e-a19a-02323913fcd4\" (UID: \"35c6bf07-4770-450e-a19a-02323913fcd4\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.317428 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.318534 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35c6bf07-4770-450e-a19a-02323913fcd4-logs" (OuterVolumeSpecName: "logs") pod "35c6bf07-4770-450e-a19a-02323913fcd4" (UID: "35c6bf07-4770-450e-a19a-02323913fcd4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.326513 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "35c6bf07-4770-450e-a19a-02323913fcd4" (UID: "35c6bf07-4770-450e-a19a-02323913fcd4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.327670 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35c6bf07-4770-450e-a19a-02323913fcd4-kube-api-access-2bp49" (OuterVolumeSpecName: "kube-api-access-2bp49") pod "35c6bf07-4770-450e-a19a-02323913fcd4" (UID: "35c6bf07-4770-450e-a19a-02323913fcd4"). InnerVolumeSpecName "kube-api-access-2bp49". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.338627 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.350001 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "35c6bf07-4770-450e-a19a-02323913fcd4" (UID: "35c6bf07-4770-450e-a19a-02323913fcd4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.365252 4940 scope.go:117] "RemoveContainer" containerID="f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca" Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.367227 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca\": container with ID starting with f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca not found: ID does not exist" containerID="f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.367283 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca"} err="failed to get container status \"f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca\": rpc error: code = NotFound desc = could not find container \"f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca\": container with ID starting with f33d483f6e6331c46dfecbe9bb66ddb37129da6d6d2e685a1f8b87f96d9857ca not found: ID does not exist" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.367316 4940 scope.go:117] "RemoveContainer" containerID="240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3" Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.370018 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3\": container with ID starting with 240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3 not found: ID does not exist" containerID="240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.370067 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3"} err="failed to get container status \"240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3\": rpc error: code = NotFound desc = could not find container \"240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3\": container with ID starting with 240ff554ce90cfdf53e28920f11c6be9e00dd9b8c325dab71546a3d973db62e3 not found: ID does not exist" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.381157 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "35c6bf07-4770-450e-a19a-02323913fcd4" (UID: "35c6bf07-4770-450e-a19a-02323913fcd4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.397560 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data" (OuterVolumeSpecName: "config-data") pod "35c6bf07-4770-450e-a19a-02323913fcd4" (UID: "35c6bf07-4770-450e-a19a-02323913fcd4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.402971 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "35c6bf07-4770-450e-a19a-02323913fcd4" (UID: "35c6bf07-4770-450e-a19a-02323913fcd4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.412710 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.414827 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.416284 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 26 07:17:58 crc kubenswrapper[4940]: E1126 07:17:58.416324 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="ovn-northd" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419336 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-combined-ca-bundle\") pod \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419403 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-combined-ca-bundle\") pod \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419444 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-config-data\") pod \"be43a059-c201-4bf3-92ac-304d58de4c02\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419479 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-scripts\") pod \"be43a059-c201-4bf3-92ac-304d58de4c02\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419502 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-logs\") pod \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419525 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-internal-tls-certs\") pod \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419554 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-config\") pod \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419580 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419603 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-config-data\") pod \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419653 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be43a059-c201-4bf3-92ac-304d58de4c02-logs\") pod \"be43a059-c201-4bf3-92ac-304d58de4c02\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419711 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9rql\" (UniqueName: \"kubernetes.io/projected/be43a059-c201-4bf3-92ac-304d58de4c02-kube-api-access-s9rql\") pod \"be43a059-c201-4bf3-92ac-304d58de4c02\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419778 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-public-tls-certs\") pod \"be43a059-c201-4bf3-92ac-304d58de4c02\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419824 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-internal-tls-certs\") pod \"be43a059-c201-4bf3-92ac-304d58de4c02\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419857 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-scripts\") pod \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.419881 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-httpd-run\") pod \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.420335 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9q82\" (UniqueName: \"kubernetes.io/projected/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-api-access-s9q82\") pod \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.420367 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwj7t\" (UniqueName: \"kubernetes.io/projected/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-kube-api-access-xwj7t\") pod \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\" (UID: \"79fc2ca6-8d13-44e6-83da-033c7f2d7df3\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.420408 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-certs\") pod \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\" (UID: \"3031ed6c-4ad3-4d47-a902-4a52bb40be6d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.420494 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-combined-ca-bundle\") pod \"be43a059-c201-4bf3-92ac-304d58de4c02\" (UID: \"be43a059-c201-4bf3-92ac-304d58de4c02\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.420970 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bp49\" (UniqueName: \"kubernetes.io/projected/35c6bf07-4770-450e-a19a-02323913fcd4-kube-api-access-2bp49\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.420996 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.421008 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.421020 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.421032 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.421061 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c6bf07-4770-450e-a19a-02323913fcd4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.421072 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35c6bf07-4770-450e-a19a-02323913fcd4-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.422873 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-logs" (OuterVolumeSpecName: "logs") pod "79fc2ca6-8d13-44e6-83da-033c7f2d7df3" (UID: "79fc2ca6-8d13-44e6-83da-033c7f2d7df3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.424845 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-scripts" (OuterVolumeSpecName: "scripts") pod "be43a059-c201-4bf3-92ac-304d58de4c02" (UID: "be43a059-c201-4bf3-92ac-304d58de4c02"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.425553 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "79fc2ca6-8d13-44e6-83da-033c7f2d7df3" (UID: "79fc2ca6-8d13-44e6-83da-033c7f2d7df3"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.426253 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be43a059-c201-4bf3-92ac-304d58de4c02-logs" (OuterVolumeSpecName: "logs") pod "be43a059-c201-4bf3-92ac-304d58de4c02" (UID: "be43a059-c201-4bf3-92ac-304d58de4c02"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.440217 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "79fc2ca6-8d13-44e6-83da-033c7f2d7df3" (UID: "79fc2ca6-8d13-44e6-83da-033c7f2d7df3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.441058 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-api-access-s9q82" (OuterVolumeSpecName: "kube-api-access-s9q82") pod "3031ed6c-4ad3-4d47-a902-4a52bb40be6d" (UID: "3031ed6c-4ad3-4d47-a902-4a52bb40be6d"). InnerVolumeSpecName "kube-api-access-s9q82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.441103 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-kube-api-access-xwj7t" (OuterVolumeSpecName: "kube-api-access-xwj7t") pod "79fc2ca6-8d13-44e6-83da-033c7f2d7df3" (UID: "79fc2ca6-8d13-44e6-83da-033c7f2d7df3"). InnerVolumeSpecName "kube-api-access-xwj7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.459961 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be43a059-c201-4bf3-92ac-304d58de4c02-kube-api-access-s9rql" (OuterVolumeSpecName: "kube-api-access-s9rql") pod "be43a059-c201-4bf3-92ac-304d58de4c02" (UID: "be43a059-c201-4bf3-92ac-304d58de4c02"). InnerVolumeSpecName "kube-api-access-s9rql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.464127 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-scripts" (OuterVolumeSpecName: "scripts") pod "79fc2ca6-8d13-44e6-83da-033c7f2d7df3" (UID: "79fc2ca6-8d13-44e6-83da-033c7f2d7df3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.501498 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3031ed6c-4ad3-4d47-a902-4a52bb40be6d" (UID: "3031ed6c-4ad3-4d47-a902-4a52bb40be6d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524540 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524581 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524594 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9q82\" (UniqueName: \"kubernetes.io/projected/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-api-access-s9q82\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524673 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwj7t\" (UniqueName: \"kubernetes.io/projected/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-kube-api-access-xwj7t\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524685 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524702 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524714 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524741 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524753 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be43a059-c201-4bf3-92ac-304d58de4c02-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.524764 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9rql\" (UniqueName: \"kubernetes.io/projected/be43a059-c201-4bf3-92ac-304d58de4c02-kube-api-access-s9rql\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.542024 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79fc2ca6-8d13-44e6-83da-033c7f2d7df3" (UID: "79fc2ca6-8d13-44e6-83da-033c7f2d7df3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.576200 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.627673 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-config-data" (OuterVolumeSpecName: "config-data") pod "be43a059-c201-4bf3-92ac-304d58de4c02" (UID: "be43a059-c201-4bf3-92ac-304d58de4c02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.631385 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.631429 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.631440 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.634030 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be43a059-c201-4bf3-92ac-304d58de4c02" (UID: "be43a059-c201-4bf3-92ac-304d58de4c02"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.642312 4940 generic.go:334] "Generic (PLEG): container finished" podID="96cf8cc9-e0ee-4aa2-9842-83d45e24c46a" containerID="8e24898a0ff112e87bf65f7928b22b6ce54972a826d301fced34470797356e71" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.642534 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0ee23-account-delete-qkbnr" event={"ID":"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a","Type":"ContainerDied","Data":"8e24898a0ff112e87bf65f7928b22b6ce54972a826d301fced34470797356e71"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.647541 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"79fc2ca6-8d13-44e6-83da-033c7f2d7df3","Type":"ContainerDied","Data":"eddd25e5f77c7035bd5cc3a39e05cc12ff82a824b564d897201f5481a00bb890"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.647591 4940 scope.go:117] "RemoveContainer" containerID="0ffb3bf39d8a1f37181283238b716fcbcfefeaecec4ce53764db5c4edb369cee" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.647678 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.651830 4940 generic.go:334] "Generic (PLEG): container finished" podID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerID="62fc80217f3946465f0f5d0d83b59bf3c3a8c7cea1b4b8754c8843a8b67e8e47" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.651893 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"310cee15-c1c5-4db3-8900-ca3107ba130d","Type":"ContainerDied","Data":"62fc80217f3946465f0f5d0d83b59bf3c3a8c7cea1b4b8754c8843a8b67e8e47"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.652272 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.655685 4940 generic.go:334] "Generic (PLEG): container finished" podID="be43a059-c201-4bf3-92ac-304d58de4c02" containerID="473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.655762 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-65d858fd7b-dbln9" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.655800 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65d858fd7b-dbln9" event={"ID":"be43a059-c201-4bf3-92ac-304d58de4c02","Type":"ContainerDied","Data":"473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.655840 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65d858fd7b-dbln9" event={"ID":"be43a059-c201-4bf3-92ac-304d58de4c02","Type":"ContainerDied","Data":"6ac3e0c055131f84f050948ddc5f680dcdceafc6cce25235724219cbc66221e1"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.658247 4940 generic.go:334] "Generic (PLEG): container finished" podID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerID="6ec52072d9ea19666daec0c37b2fb57fbc8d500a0f552cf60f1fb0ea6a25ed06" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.658374 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" event={"ID":"2952bb7b-f134-4f55-969b-e30cd8fbe53c","Type":"ContainerDied","Data":"6ec52072d9ea19666daec0c37b2fb57fbc8d500a0f552cf60f1fb0ea6a25ed06"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.660891 4940 generic.go:334] "Generic (PLEG): container finished" podID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerID="b2273f1e55c5a69eb78cfb44375ac989afcca3b119c94705b8c9afb07c4525d3" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.660956 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6db949d4cf-kdv49" event={"ID":"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16","Type":"ContainerDied","Data":"b2273f1e55c5a69eb78cfb44375ac989afcca3b119c94705b8c9afb07c4525d3"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.662533 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.664303 4940 generic.go:334] "Generic (PLEG): container finished" podID="9abf09e3-dd14-42f4-8b1d-de23d9f0f218" containerID="d13a5ed30c21b3f298bcf01eebabe76fcf8233b02bfcb43d1775bcd1e984e502" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.664478 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi31fa-account-delete-fjlwz" event={"ID":"9abf09e3-dd14-42f4-8b1d-de23d9f0f218","Type":"ContainerDied","Data":"d13a5ed30c21b3f298bcf01eebabe76fcf8233b02bfcb43d1775bcd1e984e502"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.669065 4940 generic.go:334] "Generic (PLEG): container finished" podID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerID="1b14a7d61d2e90b0760d7d58727eb7a9fea2999afab2a7de8b185771cfd9eea1" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.669126 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf","Type":"ContainerDied","Data":"1b14a7d61d2e90b0760d7d58727eb7a9fea2999afab2a7de8b185771cfd9eea1"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.669196 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.682077 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "3031ed6c-4ad3-4d47-a902-4a52bb40be6d" (UID: "3031ed6c-4ad3-4d47-a902-4a52bb40be6d"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.686487 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-config-data" (OuterVolumeSpecName: "config-data") pod "79fc2ca6-8d13-44e6-83da-033c7f2d7df3" (UID: "79fc2ca6-8d13-44e6-83da-033c7f2d7df3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.704655 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6774864d76-mfv42" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.705256 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6774864d76-mfv42" event={"ID":"35c6bf07-4770-450e-a19a-02323913fcd4","Type":"ContainerDied","Data":"72c8c6f4510b5b0a577fe2f7a8284d7c6465608e72797f12cc46ae0118ece23b"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.715100 4940 generic.go:334] "Generic (PLEG): container finished" podID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerID="5fcae63004fc694c125bd0895a86667cfe1b52745e785eba586d24f5dff5b67d" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.715165 4940 generic.go:334] "Generic (PLEG): container finished" podID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerID="aa12214a40e17b396195f7c3910988635f60c08b39041e13aa87c7d6cc3c84be" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.715213 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerDied","Data":"5fcae63004fc694c125bd0895a86667cfe1b52745e785eba586d24f5dff5b67d"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.715237 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerDied","Data":"aa12214a40e17b396195f7c3910988635f60c08b39041e13aa87c7d6cc3c84be"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.717094 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.732890 4940 scope.go:117] "RemoveContainer" containerID="8233c7cbc4acce8bdbeebbef4a2bf8d2190310e50e6de48733f430fc0c6cf042" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.733140 4940 generic.go:334] "Generic (PLEG): container finished" podID="6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" containerID="be9a79f6e9b952d84bd13f052112cb7a980f7370fa0b6edda5ce2ca0596c774c" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.733188 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d","Type":"ContainerDied","Data":"be9a79f6e9b952d84bd13f052112cb7a980f7370fa0b6edda5ce2ca0596c774c"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.744076 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.744105 4940 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.744117 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.756429 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.759494 4940 generic.go:334] "Generic (PLEG): container finished" podID="69f67fa7-ea74-4966-b69c-ab547896057e" containerID="f62286900f59e04e8b5935f72d22351db72e1e90c13dfd3c197b2183c1ddd5c0" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.759562 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"69f67fa7-ea74-4966-b69c-ab547896057e","Type":"ContainerDied","Data":"f62286900f59e04e8b5935f72d22351db72e1e90c13dfd3c197b2183c1ddd5c0"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.772066 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.775615 4940 generic.go:334] "Generic (PLEG): container finished" podID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerID="cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.775650 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ff45362c-e19e-470b-9b67-a1c2d6385ba9","Type":"ContainerDied","Data":"cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.780061 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3031ed6c-4ad3-4d47-a902-4a52bb40be6d","Type":"ContainerDied","Data":"892023edda3e99bb3d18e095752c48347362d756dd0436445985e339ced62864"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.780127 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.780729 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "3031ed6c-4ad3-4d47-a902-4a52bb40be6d" (UID: "3031ed6c-4ad3-4d47-a902-4a52bb40be6d"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.781034 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.796959 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "79fc2ca6-8d13-44e6-83da-033c7f2d7df3" (UID: "79fc2ca6-8d13-44e6-83da-033c7f2d7df3"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.800135 4940 generic.go:334] "Generic (PLEG): container finished" podID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerID="3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1" exitCode=0 Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.800348 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "be43a059-c201-4bf3-92ac-304d58de4c02" (UID: "be43a059-c201-4bf3-92ac-304d58de4c02"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.800382 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.800471 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"63d173d6-a7cc-42f3-806d-50b9c8f8b189","Type":"ContainerDied","Data":"3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1"} Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.815679 4940 scope.go:117] "RemoveContainer" containerID="473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846381 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-config-data\") pod \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846708 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-combined-ca-bundle\") pod \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846735 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data\") pod \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846774 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddp4z\" (UniqueName: \"kubernetes.io/projected/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-kube-api-access-ddp4z\") pod \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846796 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data-custom\") pod \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846837 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-config-data\") pod \"310cee15-c1c5-4db3-8900-ca3107ba130d\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846875 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-public-tls-certs\") pod \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846923 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h75zl\" (UniqueName: \"kubernetes.io/projected/310cee15-c1c5-4db3-8900-ca3107ba130d-kube-api-access-h75zl\") pod \"310cee15-c1c5-4db3-8900-ca3107ba130d\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.846987 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjzgt\" (UniqueName: \"kubernetes.io/projected/2952bb7b-f134-4f55-969b-e30cd8fbe53c-kube-api-access-kjzgt\") pod \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847017 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-internal-tls-certs\") pod \"310cee15-c1c5-4db3-8900-ca3107ba130d\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847054 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-logs\") pod \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847106 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847151 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-scripts\") pod \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847180 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-public-tls-certs\") pod \"310cee15-c1c5-4db3-8900-ca3107ba130d\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847222 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-combined-ca-bundle\") pod \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847256 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-combined-ca-bundle\") pod \"310cee15-c1c5-4db3-8900-ca3107ba130d\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847296 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-httpd-run\") pod \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\" (UID: \"605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847340 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2952bb7b-f134-4f55-969b-e30cd8fbe53c-logs\") pod \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\" (UID: \"2952bb7b-f134-4f55-969b-e30cd8fbe53c\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847376 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/310cee15-c1c5-4db3-8900-ca3107ba130d-logs\") pod \"310cee15-c1c5-4db3-8900-ca3107ba130d\" (UID: \"310cee15-c1c5-4db3-8900-ca3107ba130d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847853 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/79fc2ca6-8d13-44e6-83da-033c7f2d7df3-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847869 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.847878 4940 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/3031ed6c-4ad3-4d47-a902-4a52bb40be6d-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.852533 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/310cee15-c1c5-4db3-8900-ca3107ba130d-logs" (OuterVolumeSpecName: "logs") pod "310cee15-c1c5-4db3-8900-ca3107ba130d" (UID: "310cee15-c1c5-4db3-8900-ca3107ba130d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.859543 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "be43a059-c201-4bf3-92ac-304d58de4c02" (UID: "be43a059-c201-4bf3-92ac-304d58de4c02"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.860103 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" (UID: "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.860970 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2952bb7b-f134-4f55-969b-e30cd8fbe53c-logs" (OuterVolumeSpecName: "logs") pod "2952bb7b-f134-4f55-969b-e30cd8fbe53c" (UID: "2952bb7b-f134-4f55-969b-e30cd8fbe53c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.865372 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-logs" (OuterVolumeSpecName: "logs") pod "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" (UID: "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.869012 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-scripts" (OuterVolumeSpecName: "scripts") pod "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" (UID: "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.869832 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2952bb7b-f134-4f55-969b-e30cd8fbe53c-kube-api-access-kjzgt" (OuterVolumeSpecName: "kube-api-access-kjzgt") pod "2952bb7b-f134-4f55-969b-e30cd8fbe53c" (UID: "2952bb7b-f134-4f55-969b-e30cd8fbe53c"). InnerVolumeSpecName "kube-api-access-kjzgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.869879 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-kube-api-access-ddp4z" (OuterVolumeSpecName: "kube-api-access-ddp4z") pod "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" (UID: "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf"). InnerVolumeSpecName "kube-api-access-ddp4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.870112 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.908763 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2952bb7b-f134-4f55-969b-e30cd8fbe53c" (UID: "2952bb7b-f134-4f55-969b-e30cd8fbe53c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.920581 4940 scope.go:117] "RemoveContainer" containerID="26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.937673 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/310cee15-c1c5-4db3-8900-ca3107ba130d-kube-api-access-h75zl" (OuterVolumeSpecName: "kube-api-access-h75zl") pod "310cee15-c1c5-4db3-8900-ca3107ba130d" (UID: "310cee15-c1c5-4db3-8900-ca3107ba130d"). InnerVolumeSpecName "kube-api-access-h75zl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.946882 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6774864d76-mfv42"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948485 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-combined-ca-bundle\") pod \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948545 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5pnls\" (UniqueName: \"kubernetes.io/projected/63d173d6-a7cc-42f3-806d-50b9c8f8b189-kube-api-access-5pnls\") pod \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948619 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ff45362c-e19e-470b-9b67-a1c2d6385ba9-etc-machine-id\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948662 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-config-data\") pod \"5ac14fd0-8273-436b-89b8-a1478aaa226d\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948703 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-run-httpd\") pod \"5ac14fd0-8273-436b-89b8-a1478aaa226d\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948750 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-ceilometer-tls-certs\") pod \"5ac14fd0-8273-436b-89b8-a1478aaa226d\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948794 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-combined-ca-bundle\") pod \"5ac14fd0-8273-436b-89b8-a1478aaa226d\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948829 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6q4jw\" (UniqueName: \"kubernetes.io/projected/5ac14fd0-8273-436b-89b8-a1478aaa226d-kube-api-access-6q4jw\") pod \"5ac14fd0-8273-436b-89b8-a1478aaa226d\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948873 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-scripts\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948899 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff45362c-e19e-470b-9b67-a1c2d6385ba9-logs\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948923 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data-custom\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948948 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-log-httpd\") pod \"5ac14fd0-8273-436b-89b8-a1478aaa226d\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948968 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-public-tls-certs\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.948994 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-nova-metadata-tls-certs\") pod \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.949014 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krpxk\" (UniqueName: \"kubernetes.io/projected/ff45362c-e19e-470b-9b67-a1c2d6385ba9-kube-api-access-krpxk\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.949742 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-sg-core-conf-yaml\") pod \"5ac14fd0-8273-436b-89b8-a1478aaa226d\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.949797 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-scripts\") pod \"5ac14fd0-8273-436b-89b8-a1478aaa226d\" (UID: \"5ac14fd0-8273-436b-89b8-a1478aaa226d\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.949824 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.949844 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63d173d6-a7cc-42f3-806d-50b9c8f8b189-logs\") pod \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.949881 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-config-data\") pod \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\" (UID: \"63d173d6-a7cc-42f3-806d-50b9c8f8b189\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.949933 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-combined-ca-bundle\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.949967 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-internal-tls-certs\") pod \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\" (UID: \"ff45362c-e19e-470b-9b67-a1c2d6385ba9\") " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.953947 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff45362c-e19e-470b-9b67-a1c2d6385ba9-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.956189 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5ac14fd0-8273-436b-89b8-a1478aaa226d" (UID: "5ac14fd0-8273-436b-89b8-a1478aaa226d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.956292 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" (UID: "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.956656 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff45362c-e19e-470b-9b67-a1c2d6385ba9-logs" (OuterVolumeSpecName: "logs") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.957778 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d173d6-a7cc-42f3-806d-50b9c8f8b189-logs" (OuterVolumeSpecName: "logs") pod "63d173d6-a7cc-42f3-806d-50b9c8f8b189" (UID: "63d173d6-a7cc-42f3-806d-50b9c8f8b189"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.959393 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5ac14fd0-8273-436b-89b8-a1478aaa226d" (UID: "5ac14fd0-8273-436b-89b8-a1478aaa226d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.961520 4940 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ff45362c-e19e-470b-9b67-a1c2d6385ba9-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.973782 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6774864d76-mfv42"] Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974186 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974230 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974248 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2952bb7b-f134-4f55-969b-e30cd8fbe53c-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974274 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff45362c-e19e-470b-9b67-a1c2d6385ba9-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974289 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/310cee15-c1c5-4db3-8900-ca3107ba130d-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974303 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5ac14fd0-8273-436b-89b8-a1478aaa226d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974321 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63d173d6-a7cc-42f3-806d-50b9c8f8b189-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974341 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddp4z\" (UniqueName: \"kubernetes.io/projected/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-kube-api-access-ddp4z\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974362 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974378 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h75zl\" (UniqueName: \"kubernetes.io/projected/310cee15-c1c5-4db3-8900-ca3107ba130d-kube-api-access-h75zl\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974393 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjzgt\" (UniqueName: \"kubernetes.io/projected/2952bb7b-f134-4f55-969b-e30cd8fbe53c-kube-api-access-kjzgt\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974415 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974430 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/be43a059-c201-4bf3-92ac-304d58de4c02-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974476 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.974494 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:58 crc kubenswrapper[4940]: I1126 07:17:58.998207 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff45362c-e19e-470b-9b67-a1c2d6385ba9-kube-api-access-krpxk" (OuterVolumeSpecName: "kube-api-access-krpxk") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "kube-api-access-krpxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.016319 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-scripts" (OuterVolumeSpecName: "scripts") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.018696 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-scripts" (OuterVolumeSpecName: "scripts") pod "5ac14fd0-8273-436b-89b8-a1478aaa226d" (UID: "5ac14fd0-8273-436b-89b8-a1478aaa226d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.043459 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ac14fd0-8273-436b-89b8-a1478aaa226d-kube-api-access-6q4jw" (OuterVolumeSpecName: "kube-api-access-6q4jw") pod "5ac14fd0-8273-436b-89b8-a1478aaa226d" (UID: "5ac14fd0-8273-436b-89b8-a1478aaa226d"). InnerVolumeSpecName "kube-api-access-6q4jw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.046819 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-config-data" (OuterVolumeSpecName: "config-data") pod "310cee15-c1c5-4db3-8900-ca3107ba130d" (UID: "310cee15-c1c5-4db3-8900-ca3107ba130d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.076394 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63d173d6-a7cc-42f3-806d-50b9c8f8b189-kube-api-access-5pnls" (OuterVolumeSpecName: "kube-api-access-5pnls") pod "63d173d6-a7cc-42f3-806d-50b9c8f8b189" (UID: "63d173d6-a7cc-42f3-806d-50b9c8f8b189"). InnerVolumeSpecName "kube-api-access-5pnls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.076630 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data-custom\") pod \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.076721 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-combined-ca-bundle\") pod \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.076813 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sq6z2\" (UniqueName: \"kubernetes.io/projected/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-kube-api-access-sq6z2\") pod \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.076858 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data\") pod \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.076894 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-logs\") pod \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\" (UID: \"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.078345 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2952bb7b-f134-4f55-969b-e30cd8fbe53c" (UID: "2952bb7b-f134-4f55-969b-e30cd8fbe53c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.079781 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-logs" (OuterVolumeSpecName: "logs") pod "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" (UID: "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.080151 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.082906 4940 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.082965 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts podName:9abf09e3-dd14-42f4-8b1d-de23d9f0f218 nodeName:}" failed. No retries permitted until 2025-11-26 07:18:01.082943597 +0000 UTC m=+1382.603085216 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts") pod "novaapi31fa-account-delete-fjlwz" (UID: "9abf09e3-dd14-42f4-8b1d-de23d9f0f218") : configmap "openstack-scripts" not found Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.087017 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5pnls\" (UniqueName: \"kubernetes.io/projected/63d173d6-a7cc-42f3-806d-50b9c8f8b189-kube-api-access-5pnls\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.090572 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.090588 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6q4jw\" (UniqueName: \"kubernetes.io/projected/5ac14fd0-8273-436b-89b8-a1478aaa226d-kube-api-access-6q4jw\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.090615 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.090626 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.090637 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krpxk\" (UniqueName: \"kubernetes.io/projected/ff45362c-e19e-470b-9b67-a1c2d6385ba9-kube-api-access-krpxk\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.090647 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.090671 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.090682 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.113982 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-kube-api-access-sq6z2" (OuterVolumeSpecName: "kube-api-access-sq6z2") pod "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" (UID: "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16"). InnerVolumeSpecName "kube-api-access-sq6z2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.123894 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" (UID: "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.167135 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "310cee15-c1c5-4db3-8900-ca3107ba130d" (UID: "310cee15-c1c5-4db3-8900-ca3107ba130d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.176337 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5ac14fd0-8273-436b-89b8-a1478aaa226d" (UID: "5ac14fd0-8273-436b-89b8-a1478aaa226d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.205599 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" (UID: "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.207012 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.207176 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.207246 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.207319 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.207397 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sq6z2\" (UniqueName: \"kubernetes.io/projected/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-kube-api-access-sq6z2\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.222087 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" path="/var/lib/kubelet/pods/01a8836d-ba47-44ef-995e-f5bf2227dcd4/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.222812 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1" path="/var/lib/kubelet/pods/1f9f3ed0-d8b5-41b4-be9a-982f5f8cfde1/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.223432 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b" path="/var/lib/kubelet/pods/2fa5aea3-ec9f-41d6-b1e7-19912ab7c20b/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.224939 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="326ec868-d611-435e-9dd2-769dc279c6c5" path="/var/lib/kubelet/pods/326ec868-d611-435e-9dd2-769dc279c6c5/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.225753 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33462c2c-29fb-487a-8f23-db40ba07be25" path="/var/lib/kubelet/pods/33462c2c-29fb-487a-8f23-db40ba07be25/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.226582 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" path="/var/lib/kubelet/pods/35c6bf07-4770-450e-a19a-02323913fcd4/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.229423 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39eba712-649b-4509-be2d-72bb08e292e5" path="/var/lib/kubelet/pods/39eba712-649b-4509-be2d-72bb08e292e5/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.230006 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50646a66-7d38-481f-9f16-e33c0de6ac84" path="/var/lib/kubelet/pods/50646a66-7d38-481f-9f16-e33c0de6ac84/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.230665 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57dd2efa-d7e3-4ea2-98be-7cdb13472a59" path="/var/lib/kubelet/pods/57dd2efa-d7e3-4ea2-98be-7cdb13472a59/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.240323 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" path="/var/lib/kubelet/pods/71365c4a-71fa-4c50-9c71-b87510dcf548/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.241263 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ad2bcd9-0999-4e06-84d1-aed4c51e4edd" path="/var/lib/kubelet/pods/7ad2bcd9-0999-4e06-84d1-aed4c51e4edd/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.244368 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95b33e62-98cb-4ebe-8fa9-e1e762ee3352" path="/var/lib/kubelet/pods/95b33e62-98cb-4ebe-8fa9-e1e762ee3352/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.244626 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-config-data" (OuterVolumeSpecName: "config-data") pod "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" (UID: "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.250126 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.252809 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data" (OuterVolumeSpecName: "config-data") pod "2952bb7b-f134-4f55-969b-e30cd8fbe53c" (UID: "2952bb7b-f134-4f55-969b-e30cd8fbe53c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.261128 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8fb2242-083d-4a26-957a-0c4386c582c2" path="/var/lib/kubelet/pods/a8fb2242-083d-4a26-957a-0c4386c582c2/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.264455 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af1db786-f3c4-4881-bd70-8be92ec0b24a" path="/var/lib/kubelet/pods/af1db786-f3c4-4881-bd70-8be92ec0b24a/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.292048 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7a24b6b-c7c6-4710-a0a9-1f2730c7c333" path="/var/lib/kubelet/pods/d7a24b6b-c7c6-4710-a0a9-1f2730c7c333/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.293339 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b0f8bd-b95a-4d10-8747-11c0586a710c" path="/var/lib/kubelet/pods/f4b0f8bd-b95a-4d10-8747-11c0586a710c/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.294061 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f867029a-5e81-436e-82ed-d8c3cef5b734" path="/var/lib/kubelet/pods/f867029a-5e81-436e-82ed-d8c3cef5b734/volumes" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.309121 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" (UID: "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.309517 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/memcached-0" podUID="69f67fa7-ea74-4966-b69c-ab547896057e" containerName="memcached" probeResult="failure" output="dial tcp 10.217.0.105:11211: connect: connection refused" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.310271 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.310309 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.310833 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.310859 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2952bb7b-f134-4f55-969b-e30cd8fbe53c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.341735 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "310cee15-c1c5-4db3-8900-ca3107ba130d" (UID: "310cee15-c1c5-4db3-8900-ca3107ba130d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.366967 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" (UID: "605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.379842 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "63d173d6-a7cc-42f3-806d-50b9c8f8b189" (UID: "63d173d6-a7cc-42f3-806d-50b9c8f8b189"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.385470 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.420882 4940 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.420931 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.420941 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.421596 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data" (OuterVolumeSpecName: "config-data") pod "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" (UID: "ce424ac9-8f67-48d5-9cd6-3274e3f9bc16"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.430388 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wx6d7"] Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.430812 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerName="glance-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.430835 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerName="glance-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.430851 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerName="glance-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.430862 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerName="glance-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.430880 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.430889 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-log" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.430907 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerName="cinder-scheduler" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.430915 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerName="cinder-scheduler" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.430929 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerName="barbican-keystone-listener-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.430937 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerName="barbican-keystone-listener-log" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.430951 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a428662-7264-4abe-837b-64739810c829" containerName="dnsmasq-dns" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.430958 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a428662-7264-4abe-837b-64739810c829" containerName="dnsmasq-dns" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.430974 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.430981 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.430994 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431001 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431017 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3baa7ac-9221-47e0-afb0-25715f0e2491" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431024 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3baa7ac-9221-47e0-afb0-25715f0e2491" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431056 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-metadata" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431065 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-metadata" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431079 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" containerName="galera" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431086 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" containerName="galera" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431099 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerName="proxy-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431107 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerName="proxy-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431122 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" containerName="placement-api" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431129 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" containerName="placement-api" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431138 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" containerName="mysql-bootstrap" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431145 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" containerName="mysql-bootstrap" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431156 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="ovsdbserver-sb" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431195 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="ovsdbserver-sb" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431211 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="ceilometer-central-agent" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431219 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="ceilometer-central-agent" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431237 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a428662-7264-4abe-837b-64739810c829" containerName="init" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431245 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a428662-7264-4abe-837b-64739810c829" containerName="init" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431256 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerName="proxy-server" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431263 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerName="proxy-server" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431272 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerName="barbican-keystone-listener" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431280 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerName="barbican-keystone-listener" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431295 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerName="probe" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431303 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerName="probe" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431317 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431324 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431336 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerName="barbican-worker" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431345 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerName="barbican-worker" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431354 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="proxy-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431362 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="proxy-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431377 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431384 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431398 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerName="glance-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431407 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerName="glance-log" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431421 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-api" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431428 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-api" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431437 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90360054-700b-4de8-9f51-f9b19cde50e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431446 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="90360054-700b-4de8-9f51-f9b19cde50e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431460 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99d95f08-663c-4443-9a16-459f02985879" containerName="ovsdbserver-nb" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431467 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="99d95f08-663c-4443-9a16-459f02985879" containerName="ovsdbserver-nb" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431478 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" containerName="placement-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431487 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" containerName="placement-log" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431502 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431510 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431526 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerName="barbican-worker-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431534 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerName="barbican-worker-log" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431543 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99d95f08-663c-4443-9a16-459f02985879" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431551 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="99d95f08-663c-4443-9a16-459f02985879" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431563 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="ceilometer-notification-agent" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431572 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="ceilometer-notification-agent" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431583 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="sg-core" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431590 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="sg-core" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431604 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431611 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431621 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3031ed6c-4ad3-4d47-a902-4a52bb40be6d" containerName="kube-state-metrics" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431629 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3031ed6c-4ad3-4d47-a902-4a52bb40be6d" containerName="kube-state-metrics" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431638 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96cf8cc9-e0ee-4aa2-9842-83d45e24c46a" containerName="mariadb-account-delete" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431646 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="96cf8cc9-e0ee-4aa2-9842-83d45e24c46a" containerName="mariadb-account-delete" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.431656 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerName="glance-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431664 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerName="glance-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431859 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerName="barbican-keystone-listener-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431873 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerName="probe" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431885 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="99d95f08-663c-4443-9a16-459f02985879" containerName="ovsdbserver-nb" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431894 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="ceilometer-central-agent" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431906 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-api" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431919 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="sg-core" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431929 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerName="barbican-worker-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431943 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="71365c4a-71fa-4c50-9c71-b87510dcf548" containerName="cinder-scheduler" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431957 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" containerName="barbican-keystone-listener" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431968 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a428662-7264-4abe-837b-64739810c829" containerName="dnsmasq-dns" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431982 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.431995 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432004 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432016 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-metadata" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432026 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="proxy-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432075 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" containerName="nova-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432091 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerName="glance-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432100 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" containerName="glance-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432109 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69f4262a-7eb3-4091-b103-393b9ab3a720" containerName="ovsdbserver-sb" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432120 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c6bf07-4770-450e-a19a-02323913fcd4" containerName="barbican-api-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432132 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f53eb12-2c7d-4107-9d63-f0db8e983d90" containerName="galera" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432146 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="99d95f08-663c-4443-9a16-459f02985879" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432155 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="3031ed6c-4ad3-4d47-a902-4a52bb40be6d" containerName="kube-state-metrics" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432166 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" containerName="placement-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432176 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3baa7ac-9221-47e0-afb0-25715f0e2491" containerName="openstack-network-exporter" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432184 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" containerName="placement-api" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432196 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerName="glance-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432206 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" containerName="barbican-worker" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432213 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" containerName="ceilometer-notification-agent" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432222 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" containerName="glance-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432235 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="90360054-700b-4de8-9f51-f9b19cde50e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432245 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="96cf8cc9-e0ee-4aa2-9842-83d45e24c46a" containerName="mariadb-account-delete" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432253 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerName="proxy-httpd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432264 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="01a8836d-ba47-44ef-995e-f5bf2227dcd4" containerName="proxy-server" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432278 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" containerName="nova-metadata-log" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.432291 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" containerName="cinder-api" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.433987 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wx6d7"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.434114 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.445147 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "310cee15-c1c5-4db3-8900-ca3107ba130d" (UID: "310cee15-c1c5-4db3-8900-ca3107ba130d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.446525 4940 scope.go:117] "RemoveContainer" containerID="473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.460123 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649\": container with ID starting with 473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649 not found: ID does not exist" containerID="473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.460354 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649"} err="failed to get container status \"473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649\": rpc error: code = NotFound desc = could not find container \"473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649\": container with ID starting with 473af5ba1befe7266945f46774d79bc50378f104cabd28317521a30b2b458649 not found: ID does not exist" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.460439 4940 scope.go:117] "RemoveContainer" containerID="26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.492301 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860\": container with ID starting with 26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860 not found: ID does not exist" containerID="26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.492351 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860"} err="failed to get container status \"26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860\": rpc error: code = NotFound desc = could not find container \"26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860\": container with ID starting with 26860b18dae51863c6fcb586390568b94c19ac08b1b4734caed88b7793668860 not found: ID does not exist" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.492384 4940 scope.go:117] "RemoveContainer" containerID="1b14a7d61d2e90b0760d7d58727eb7a9fea2999afab2a7de8b185771cfd9eea1" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.492566 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5d7f54fb65-6vndb" podUID="0a428662-7264-4abe-837b-64739810c829" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.195:5353: i/o timeout" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.522110 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9fsf\" (UniqueName: \"kubernetes.io/projected/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-kube-api-access-m9fsf\") pod \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\" (UID: \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.522157 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-operator-scripts\") pod \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\" (UID: \"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.522510 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.522525 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/310cee15-c1c5-4db3-8900-ca3107ba130d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.522867 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "96cf8cc9-e0ee-4aa2-9842-83d45e24c46a" (UID: "96cf8cc9-e0ee-4aa2-9842-83d45e24c46a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.532671 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-65d858fd7b-dbln9"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.545630 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-config-data" (OuterVolumeSpecName: "config-data") pod "5ac14fd0-8273-436b-89b8-a1478aaa226d" (UID: "5ac14fd0-8273-436b-89b8-a1478aaa226d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.547640 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-kube-api-access-m9fsf" (OuterVolumeSpecName: "kube-api-access-m9fsf") pod "96cf8cc9-e0ee-4aa2-9842-83d45e24c46a" (UID: "96cf8cc9-e0ee-4aa2-9842-83d45e24c46a"). InnerVolumeSpecName "kube-api-access-m9fsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.560801 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-65d858fd7b-dbln9"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.569021 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.584737 4940 scope.go:117] "RemoveContainer" containerID="5d1a5be719119c7cfea6def8095a88a5e3c62d7b6d09bd5c94f113735004fb3f" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.591129 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.599105 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.609823 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.611295 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-config-data" (OuterVolumeSpecName: "config-data") pod "63d173d6-a7cc-42f3-806d-50b9c8f8b189" (UID: "63d173d6-a7cc-42f3-806d-50b9c8f8b189"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624053 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-catalog-content\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624108 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfnpd\" (UniqueName: \"kubernetes.io/projected/3cc67a97-ea67-4814-a822-9a81d093db45-kube-api-access-gfnpd\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624261 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-utilities\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624428 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9fsf\" (UniqueName: \"kubernetes.io/projected/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-kube-api-access-m9fsf\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624443 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624455 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624468 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624481 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.624549 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.624601 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data podName:69972749-03ff-48e9-b031-99c33ce86e96 nodeName:}" failed. No retries permitted until 2025-11-26 07:18:07.624583696 +0000 UTC m=+1389.144725315 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data") pod "rabbitmq-cell1-server-0" (UID: "69972749-03ff-48e9-b031-99c33ce86e96") : configmap "rabbitmq-cell1-config-data" not found Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.624992 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.633230 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63d173d6-a7cc-42f3-806d-50b9c8f8b189" (UID: "63d173d6-a7cc-42f3-806d-50b9c8f8b189"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.641606 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.644211 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.665269 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e146e6ba7d8a0d8f7e78795f75427c31b1e5c239738564750f702ee38a6b5f61" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.665477 4940 scope.go:117] "RemoveContainer" containerID="38ebe7a40f2302221b977bb730a4dafa3b829dbc717ce7001d421d16b83d3667" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.665683 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.667837 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e146e6ba7d8a0d8f7e78795f75427c31b1e5c239738564750f702ee38a6b5f61" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.672328 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e146e6ba7d8a0d8f7e78795f75427c31b1e5c239738564750f702ee38a6b5f61" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.672447 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="f93321ef-2519-4bc0-b3d1-a45194267ca6" containerName="nova-cell0-conductor-conductor" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.728931 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-catalog-content\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.729111 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfnpd\" (UniqueName: \"kubernetes.io/projected/3cc67a97-ea67-4814-a822-9a81d093db45-kube-api-access-gfnpd\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.729230 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-utilities\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.729370 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.729474 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63d173d6-a7cc-42f3-806d-50b9c8f8b189-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.729491 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-catalog-content\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.729867 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-utilities\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.729891 4940 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 07:17:59 crc kubenswrapper[4940]: E1126 07:17:59.730024 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data podName:11a17576-9a94-4e2d-8915-9d838de09f0b nodeName:}" failed. No retries permitted until 2025-11-26 07:18:07.730008649 +0000 UTC m=+1389.250150258 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data") pod "rabbitmq-server-0" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b") : configmap "rabbitmq-config-data" not found Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.730429 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.763148 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.769993 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfnpd\" (UniqueName: \"kubernetes.io/projected/3cc67a97-ea67-4814-a822-9a81d093db45-kube-api-access-gfnpd\") pod \"community-operators-wx6d7\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.775508 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.781082 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.788055 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.793427 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.808964 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.830055 4940 scope.go:117] "RemoveContainer" containerID="af6d07c01a8f45e7b8f9c9d2cebf33c57841077f225e3827093818e8a7e717c3" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.830988 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdjtw\" (UniqueName: \"kubernetes.io/projected/eb19b71d-413f-46df-a509-7dc7aff75598-kube-api-access-kdjtw\") pod \"eb19b71d-413f-46df-a509-7dc7aff75598\" (UID: \"eb19b71d-413f-46df-a509-7dc7aff75598\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.831141 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k22mj\" (UniqueName: \"kubernetes.io/projected/4a6c9a20-06bc-43f8-aad9-fb5d72231110-kube-api-access-k22mj\") pod \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\" (UID: \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.831295 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-operator-scripts\") pod \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\" (UID: \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.831448 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a6c9a20-06bc-43f8-aad9-fb5d72231110-operator-scripts\") pod \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\" (UID: \"4a6c9a20-06bc-43f8-aad9-fb5d72231110\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.831649 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da80f9db-6be1-459c-9d61-ca1fc206d472-operator-scripts\") pod \"da80f9db-6be1-459c-9d61-ca1fc206d472\" (UID: \"da80f9db-6be1-459c-9d61-ca1fc206d472\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.831765 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x87dv\" (UniqueName: \"kubernetes.io/projected/da80f9db-6be1-459c-9d61-ca1fc206d472-kube-api-access-x87dv\") pod \"da80f9db-6be1-459c-9d61-ca1fc206d472\" (UID: \"da80f9db-6be1-459c-9d61-ca1fc206d472\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.832223 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e67a9769-a54d-4b7a-ac04-dcbe4bc5662c" (UID: "e67a9769-a54d-4b7a-ac04-dcbe4bc5662c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.832472 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron2ea3-account-delete-7wj9l" event={"ID":"4a6c9a20-06bc-43f8-aad9-fb5d72231110","Type":"ContainerDied","Data":"2601733878ca9b8e42d76c0a11edd9715f18a3944030e68357da86382a6bf2a3"} Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.832514 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2601733878ca9b8e42d76c0a11edd9715f18a3944030e68357da86382a6bf2a3" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.832580 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron2ea3-account-delete-7wj9l" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.834740 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a6c9a20-06bc-43f8-aad9-fb5d72231110-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4a6c9a20-06bc-43f8-aad9-fb5d72231110" (UID: "4a6c9a20-06bc-43f8-aad9-fb5d72231110"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.834971 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb19b71d-413f-46df-a509-7dc7aff75598-operator-scripts\") pod \"eb19b71d-413f-46df-a509-7dc7aff75598\" (UID: \"eb19b71d-413f-46df-a509-7dc7aff75598\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.835250 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6rsd\" (UniqueName: \"kubernetes.io/projected/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-kube-api-access-c6rsd\") pod \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\" (UID: \"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.835769 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da80f9db-6be1-459c-9d61-ca1fc206d472-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "da80f9db-6be1-459c-9d61-ca1fc206d472" (UID: "da80f9db-6be1-459c-9d61-ca1fc206d472"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.836512 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb19b71d-413f-46df-a509-7dc7aff75598-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eb19b71d-413f-46df-a509-7dc7aff75598" (UID: "eb19b71d-413f-46df-a509-7dc7aff75598"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.838304 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb19b71d-413f-46df-a509-7dc7aff75598-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.838454 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.838564 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a6c9a20-06bc-43f8-aad9-fb5d72231110-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.838708 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da80f9db-6be1-459c-9d61-ca1fc206d472-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.842642 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-kube-api-access-c6rsd" (OuterVolumeSpecName: "kube-api-access-c6rsd") pod "e67a9769-a54d-4b7a-ac04-dcbe4bc5662c" (UID: "e67a9769-a54d-4b7a-ac04-dcbe4bc5662c"). InnerVolumeSpecName "kube-api-access-c6rsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.844929 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb19b71d-413f-46df-a509-7dc7aff75598-kube-api-access-kdjtw" (OuterVolumeSpecName: "kube-api-access-kdjtw") pod "eb19b71d-413f-46df-a509-7dc7aff75598" (UID: "eb19b71d-413f-46df-a509-7dc7aff75598"). InnerVolumeSpecName "kube-api-access-kdjtw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.846885 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ff45362c-e19e-470b-9b67-a1c2d6385ba9","Type":"ContainerDied","Data":"0d358721ff987af6c7566e52a9dd59e0ed7fb881e692d8dcdb35b8b6cf753163"} Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.847009 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.851680 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.861317 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6db949d4cf-kdv49" event={"ID":"ce424ac9-8f67-48d5-9cd6-3274e3f9bc16","Type":"ContainerDied","Data":"a19eca65076752b981a7e05d304ffa4d8d838c9ceecd139bca4ceb2e7c58740e"} Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.861810 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6db949d4cf-kdv49" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.865214 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da80f9db-6be1-459c-9d61-ca1fc206d472-kube-api-access-x87dv" (OuterVolumeSpecName: "kube-api-access-x87dv") pod "da80f9db-6be1-459c-9d61-ca1fc206d472" (UID: "da80f9db-6be1-459c-9d61-ca1fc206d472"). InnerVolumeSpecName "kube-api-access-x87dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.869580 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a6c9a20-06bc-43f8-aad9-fb5d72231110-kube-api-access-k22mj" (OuterVolumeSpecName: "kube-api-access-k22mj") pod "4a6c9a20-06bc-43f8-aad9-fb5d72231110" (UID: "4a6c9a20-06bc-43f8-aad9-fb5d72231110"). InnerVolumeSpecName "kube-api-access-k22mj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.869814 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.869818 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7864499f6b-p77t2" event={"ID":"2952bb7b-f134-4f55-969b-e30cd8fbe53c","Type":"ContainerDied","Data":"90fc2753869b4009b04c00b5efa8ee4e5f02f38c56a8cbea54f72dbe16c3696c"} Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.886152 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementf807-account-delete-wgr4g" event={"ID":"eb19b71d-413f-46df-a509-7dc7aff75598","Type":"ContainerDied","Data":"4d5b8ae7cec5c94611367b827bd086868f59549c03572a3e35c8f269b50a6d8c"} Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.886237 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d5b8ae7cec5c94611367b827bd086868f59549c03572a3e35c8f269b50a6d8c" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.886417 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementf807-account-delete-wgr4g" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.895290 4940 scope.go:117] "RemoveContainer" containerID="cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.923674 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.940400 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7rmr\" (UniqueName: \"kubernetes.io/projected/160032d4-a9c0-4b2c-be8b-f4a5c188c451-kube-api-access-d7rmr\") pod \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.940537 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59926125-c1e1-4ac6-aa0e-2c4256046612-operator-scripts\") pod \"59926125-c1e1-4ac6-aa0e-2c4256046612\" (UID: \"59926125-c1e1-4ac6-aa0e-2c4256046612\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.940577 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-combined-ca-bundle\") pod \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.940617 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-config-data\") pod \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\" (UID: \"160032d4-a9c0-4b2c-be8b-f4a5c188c451\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.940664 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzrr4\" (UniqueName: \"kubernetes.io/projected/59926125-c1e1-4ac6-aa0e-2c4256046612-kube-api-access-fzrr4\") pod \"59926125-c1e1-4ac6-aa0e-2c4256046612\" (UID: \"59926125-c1e1-4ac6-aa0e-2c4256046612\") " Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.941000 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k22mj\" (UniqueName: \"kubernetes.io/projected/4a6c9a20-06bc-43f8-aad9-fb5d72231110-kube-api-access-k22mj\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.941011 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x87dv\" (UniqueName: \"kubernetes.io/projected/da80f9db-6be1-459c-9d61-ca1fc206d472-kube-api-access-x87dv\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.941020 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6rsd\" (UniqueName: \"kubernetes.io/projected/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c-kube-api-access-c6rsd\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.941029 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdjtw\" (UniqueName: \"kubernetes.io/projected/eb19b71d-413f-46df-a509-7dc7aff75598-kube-api-access-kdjtw\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.942282 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7864499f6b-p77t2"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.949115 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-7864499f6b-p77t2"] Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.950519 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.955198 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59926125-c1e1-4ac6-aa0e-2c4256046612-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "59926125-c1e1-4ac6-aa0e-2c4256046612" (UID: "59926125-c1e1-4ac6-aa0e-2c4256046612"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.959215 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59926125-c1e1-4ac6-aa0e-2c4256046612-kube-api-access-fzrr4" (OuterVolumeSpecName: "kube-api-access-fzrr4") pod "59926125-c1e1-4ac6-aa0e-2c4256046612" (UID: "59926125-c1e1-4ac6-aa0e-2c4256046612"). InnerVolumeSpecName "kube-api-access-fzrr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.959707 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ac14fd0-8273-436b-89b8-a1478aaa226d" (UID: "5ac14fd0-8273-436b-89b8-a1478aaa226d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:59 crc kubenswrapper[4940]: I1126 07:17:59.998675 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/160032d4-a9c0-4b2c-be8b-f4a5c188c451-kube-api-access-d7rmr" (OuterVolumeSpecName: "kube-api-access-d7rmr") pod "160032d4-a9c0-4b2c-be8b-f4a5c188c451" (UID: "160032d4-a9c0-4b2c-be8b-f4a5c188c451"). InnerVolumeSpecName "kube-api-access-d7rmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.005389 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "160032d4-a9c0-4b2c-be8b-f4a5c188c451" (UID: "160032d4-a9c0-4b2c-be8b-f4a5c188c451"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.005916 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"310cee15-c1c5-4db3-8900-ca3107ba130d","Type":"ContainerDied","Data":"be27ab6856f9259db0d079afe6e4d935a9c48401811e84fa8b80fc98686c9af4"} Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.006030 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.025262 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder7f7c-account-delete-rkd24" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.030737 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder7f7c-account-delete-rkd24" event={"ID":"e67a9769-a54d-4b7a-ac04-dcbe4bc5662c","Type":"ContainerDied","Data":"1dff2bb7fa3b12cd5b4e6d018c62e67ce2a2a4cecdd2961b0f1c3f7bea262fe5"} Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.030834 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1dff2bb7fa3b12cd5b4e6d018c62e67ce2a2a4cecdd2961b0f1c3f7bea262fe5" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.045901 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.045945 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59926125-c1e1-4ac6-aa0e-2c4256046612-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.045959 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.045972 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.045989 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzrr4\" (UniqueName: \"kubernetes.io/projected/59926125-c1e1-4ac6-aa0e-2c4256046612-kube-api-access-fzrr4\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.046002 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7rmr\" (UniqueName: \"kubernetes.io/projected/160032d4-a9c0-4b2c-be8b-f4a5c188c451-kube-api-access-d7rmr\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.046201 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "5ac14fd0-8273-436b-89b8-a1478aaa226d" (UID: "5ac14fd0-8273-436b-89b8-a1478aaa226d"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.048952 4940 generic.go:334] "Generic (PLEG): container finished" podID="160032d4-a9c0-4b2c-be8b-f4a5c188c451" containerID="af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731" exitCode=0 Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.049066 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"160032d4-a9c0-4b2c-be8b-f4a5c188c451","Type":"ContainerDied","Data":"af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731"} Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.049272 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.067905 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data" (OuterVolumeSpecName: "config-data") pod "ff45362c-e19e-470b-9b67-a1c2d6385ba9" (UID: "ff45362c-e19e-470b-9b67-a1c2d6385ba9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.096090 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance6413-account-delete-nckhw" event={"ID":"da80f9db-6be1-459c-9d61-ca1fc206d472","Type":"ContainerDied","Data":"679a6b9824ac88ddfe86a5b89034695430ae6a0d125833692c2d6d5edec98f14"} Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.096217 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance6413-account-delete-nckhw" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.098591 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-config-data" (OuterVolumeSpecName: "config-data") pod "160032d4-a9c0-4b2c-be8b-f4a5c188c451" (UID: "160032d4-a9c0-4b2c-be8b-f4a5c188c451"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.117800 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5ac14fd0-8273-436b-89b8-a1478aaa226d","Type":"ContainerDied","Data":"4d764f9bbe565c918e44127af2ec29d02577a905188f2cf547aa6a2e8d122bce"} Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.117921 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.123172 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0ee23-account-delete-qkbnr" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.123370 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0ee23-account-delete-qkbnr" event={"ID":"96cf8cc9-e0ee-4aa2-9842-83d45e24c46a","Type":"ContainerDied","Data":"3a35fcf450a2778c7357bf8d33f90b0f5e57f498b80f86c9155aafd80eb0ecff"} Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.124155 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a35fcf450a2778c7357bf8d33f90b0f5e57f498b80f86c9155aafd80eb0ecff" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.146675 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/160032d4-a9c0-4b2c-be8b-f4a5c188c451-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.146710 4940 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5ac14fd0-8273-436b-89b8-a1478aaa226d-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.146723 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff45362c-e19e-470b-9b67-a1c2d6385ba9-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.310157 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.322930 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.349786 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6tgd\" (UniqueName: \"kubernetes.io/projected/69f67fa7-ea74-4966-b69c-ab547896057e-kube-api-access-k6tgd\") pod \"69f67fa7-ea74-4966-b69c-ab547896057e\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.349855 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-combined-ca-bundle\") pod \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.349893 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-kolla-config\") pod \"69f67fa7-ea74-4966-b69c-ab547896057e\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.349930 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-combined-ca-bundle\") pod \"69f67fa7-ea74-4966-b69c-ab547896057e\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.349966 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7g2fk\" (UniqueName: \"kubernetes.io/projected/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-kube-api-access-7g2fk\") pod \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.350000 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-config-data\") pod \"69f67fa7-ea74-4966-b69c-ab547896057e\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.350022 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-memcached-tls-certs\") pod \"69f67fa7-ea74-4966-b69c-ab547896057e\" (UID: \"69f67fa7-ea74-4966-b69c-ab547896057e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.350136 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-config-data\") pod \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\" (UID: \"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.353016 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-config-data" (OuterVolumeSpecName: "config-data") pod "69f67fa7-ea74-4966-b69c-ab547896057e" (UID: "69f67fa7-ea74-4966-b69c-ab547896057e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.353675 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "69f67fa7-ea74-4966-b69c-ab547896057e" (UID: "69f67fa7-ea74-4966-b69c-ab547896057e"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.375385 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron2ea3-account-delete-7wj9l"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.376094 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69f67fa7-ea74-4966-b69c-ab547896057e-kube-api-access-k6tgd" (OuterVolumeSpecName: "kube-api-access-k6tgd") pod "69f67fa7-ea74-4966-b69c-ab547896057e" (UID: "69f67fa7-ea74-4966-b69c-ab547896057e"). InnerVolumeSpecName "kube-api-access-k6tgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.389701 4940 scope.go:117] "RemoveContainer" containerID="f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.395553 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron2ea3-account-delete-7wj9l"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.403954 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-kube-api-access-7g2fk" (OuterVolumeSpecName: "kube-api-access-7g2fk") pod "6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" (UID: "6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d"). InnerVolumeSpecName "kube-api-access-7g2fk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.404915 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6db949d4cf-kdv49"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.407903 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69f67fa7-ea74-4966-b69c-ab547896057e" (UID: "69f67fa7-ea74-4966-b69c-ab547896057e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.412937 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-6db949d4cf-kdv49"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.419438 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" (UID: "6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.448305 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-config-data" (OuterVolumeSpecName: "config-data") pod "6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" (UID: "6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.451835 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.451864 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6tgd\" (UniqueName: \"kubernetes.io/projected/69f67fa7-ea74-4966-b69c-ab547896057e-kube-api-access-k6tgd\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.451877 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.451891 4940 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.451902 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.451910 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7g2fk\" (UniqueName: \"kubernetes.io/projected/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d-kube-api-access-7g2fk\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.451918 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69f67fa7-ea74-4966-b69c-ab547896057e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.458235 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.458299 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.468502 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance6413-account-delete-nckhw"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.487619 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance6413-account-delete-nckhw"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.487828 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "69f67fa7-ea74-4966-b69c-ab547896057e" (UID: "69f67fa7-ea74-4966-b69c-ab547896057e"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.503416 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementf807-account-delete-wgr4g"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.519798 4940 scope.go:117] "RemoveContainer" containerID="dc48ce55d4720539d72187d51174a10a64823a9c28973e4e5b3bfa0a2a2a6c23" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.529461 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placementf807-account-delete-wgr4g"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.538613 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0ee23-account-delete-qkbnr"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.546350 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0ee23-account-delete-qkbnr"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.553997 4940 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/69f67fa7-ea74-4966-b69c-ab547896057e-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.554011 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder7f7c-account-delete-rkd24"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.562174 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder7f7c-account-delete-rkd24"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.569588 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.577067 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.584660 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.594944 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.605989 4940 scope.go:117] "RemoveContainer" containerID="3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.614260 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.614913 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.615618 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.619845 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.619893 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.620173 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.622816 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.625684 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.625740 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.630505 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.639318 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.643142 4940 scope.go:117] "RemoveContainer" containerID="31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.698264 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wx6d7"] Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.699445 4940 scope.go:117] "RemoveContainer" containerID="cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd" Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.699966 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd\": container with ID starting with cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd not found: ID does not exist" containerID="cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.700004 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd"} err="failed to get container status \"cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd\": rpc error: code = NotFound desc = could not find container \"cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd\": container with ID starting with cecf61563fd078333b349029afd0a936b9dda5d88171b20eff272fcf61398cfd not found: ID does not exist" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.700048 4940 scope.go:117] "RemoveContainer" containerID="f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594" Nov 26 07:18:00 crc kubenswrapper[4940]: E1126 07:18:00.700414 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594\": container with ID starting with f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594 not found: ID does not exist" containerID="f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.700437 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594"} err="failed to get container status \"f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594\": rpc error: code = NotFound desc = could not find container \"f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594\": container with ID starting with f1559817d18ac0702b5459705c871239903ad5b214d9f2a88eb19e1aad92e594 not found: ID does not exist" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.700453 4940 scope.go:117] "RemoveContainer" containerID="b2273f1e55c5a69eb78cfb44375ac989afcca3b119c94705b8c9afb07c4525d3" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.759296 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts\") pod \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\" (UID: \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.759696 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4bff\" (UniqueName: \"kubernetes.io/projected/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-kube-api-access-z4bff\") pod \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\" (UID: \"9abf09e3-dd14-42f4-8b1d-de23d9f0f218\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.760348 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9abf09e3-dd14-42f4-8b1d-de23d9f0f218" (UID: "9abf09e3-dd14-42f4-8b1d-de23d9f0f218"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.764635 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-kube-api-access-z4bff" (OuterVolumeSpecName: "kube-api-access-z4bff") pod "9abf09e3-dd14-42f4-8b1d-de23d9f0f218" (UID: "9abf09e3-dd14-42f4-8b1d-de23d9f0f218"). InnerVolumeSpecName "kube-api-access-z4bff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: W1126 07:18:00.780583 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cc67a97_ea67_4814_a822_9a81d093db45.slice/crio-8fa957f45c7767d2c85334c147fc0f3f36c5625c6e496cb02b98297774a96b01 WatchSource:0}: Error finding container 8fa957f45c7767d2c85334c147fc0f3f36c5625c6e496cb02b98297774a96b01: Status 404 returned error can't find the container with id 8fa957f45c7767d2c85334c147fc0f3f36c5625c6e496cb02b98297774a96b01 Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.781610 4940 scope.go:117] "RemoveContainer" containerID="68f96383a43cb6661fc9011d58327bf045a794abb89b6d697dff06c631c6488d" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.802008 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-78r7g" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.814548 4940 scope.go:117] "RemoveContainer" containerID="6ec52072d9ea19666daec0c37b2fb57fbc8d500a0f552cf60f1fb0ea6a25ed06" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.818837 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.837465 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.852837 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-78r7g" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" containerName="ovn-controller" probeResult="failure" output=< Nov 26 07:18:00 crc kubenswrapper[4940]: ERROR - Failed to get connection status from ovn-controller, ovn-appctl exit status: 0 Nov 26 07:18:00 crc kubenswrapper[4940]: > Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.860623 4940 scope.go:117] "RemoveContainer" containerID="4bf744e80f9468639290e0122be55a99e227ce639f923bfca4bfba0a9c767cde" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.860995 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-plugins\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861027 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-tls\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861058 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-erlang-cookie\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861082 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/11a17576-9a94-4e2d-8915-9d838de09f0b-erlang-cookie-secret\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861097 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-server-conf\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861136 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861219 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-galera-tls-certs\") pod \"a6c56309-82af-4734-a3d4-6c203fd5b23e\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861239 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-default\") pod \"a6c56309-82af-4734-a3d4-6c203fd5b23e\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861310 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861359 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llk2t\" (UniqueName: \"kubernetes.io/projected/a6c56309-82af-4734-a3d4-6c203fd5b23e-kube-api-access-llk2t\") pod \"a6c56309-82af-4734-a3d4-6c203fd5b23e\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861384 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-operator-scripts\") pod \"a6c56309-82af-4734-a3d4-6c203fd5b23e\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861409 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"a6c56309-82af-4734-a3d4-6c203fd5b23e\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861471 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ct48l\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-kube-api-access-ct48l\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861536 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-kolla-config\") pod \"a6c56309-82af-4734-a3d4-6c203fd5b23e\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861556 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-plugins-conf\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861578 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/11a17576-9a94-4e2d-8915-9d838de09f0b-pod-info\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861597 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-confd\") pod \"11a17576-9a94-4e2d-8915-9d838de09f0b\" (UID: \"11a17576-9a94-4e2d-8915-9d838de09f0b\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861617 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-combined-ca-bundle\") pod \"a6c56309-82af-4734-a3d4-6c203fd5b23e\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861638 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-generated\") pod \"a6c56309-82af-4734-a3d4-6c203fd5b23e\" (UID: \"a6c56309-82af-4734-a3d4-6c203fd5b23e\") " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861888 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.861901 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4bff\" (UniqueName: \"kubernetes.io/projected/9abf09e3-dd14-42f4-8b1d-de23d9f0f218-kube-api-access-z4bff\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.862359 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "a6c56309-82af-4734-a3d4-6c203fd5b23e" (UID: "a6c56309-82af-4734-a3d4-6c203fd5b23e"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.864731 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "a6c56309-82af-4734-a3d4-6c203fd5b23e" (UID: "a6c56309-82af-4734-a3d4-6c203fd5b23e"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.865715 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6c56309-82af-4734-a3d4-6c203fd5b23e-kube-api-access-llk2t" (OuterVolumeSpecName: "kube-api-access-llk2t") pod "a6c56309-82af-4734-a3d4-6c203fd5b23e" (UID: "a6c56309-82af-4734-a3d4-6c203fd5b23e"). InnerVolumeSpecName "kube-api-access-llk2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.865838 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a6c56309-82af-4734-a3d4-6c203fd5b23e" (UID: "a6c56309-82af-4734-a3d4-6c203fd5b23e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.866241 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.867333 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11a17576-9a94-4e2d-8915-9d838de09f0b-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.867306 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.867918 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.868464 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.869219 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/11a17576-9a94-4e2d-8915-9d838de09f0b-pod-info" (OuterVolumeSpecName: "pod-info") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.877399 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "a6c56309-82af-4734-a3d4-6c203fd5b23e" (UID: "a6c56309-82af-4734-a3d4-6c203fd5b23e"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.892638 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-kube-api-access-ct48l" (OuterVolumeSpecName: "kube-api-access-ct48l") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "kube-api-access-ct48l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.892709 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.904180 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "mysql-db") pod "a6c56309-82af-4734-a3d4-6c203fd5b23e" (UID: "a6c56309-82af-4734-a3d4-6c203fd5b23e"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.963576 4940 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.963806 4940 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.963881 4940 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/11a17576-9a94-4e2d-8915-9d838de09f0b-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.963962 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.964079 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.964182 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.964380 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.964505 4940 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/11a17576-9a94-4e2d-8915-9d838de09f0b-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.964606 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.964720 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.964859 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llk2t\" (UniqueName: \"kubernetes.io/projected/a6c56309-82af-4734-a3d4-6c203fd5b23e-kube-api-access-llk2t\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.964986 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6c56309-82af-4734-a3d4-6c203fd5b23e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.965077 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.965153 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ct48l\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-kube-api-access-ct48l\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.983873 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 26 07:18:00 crc kubenswrapper[4940]: I1126 07:18:00.999415 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.066110 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.066334 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.142315 4940 scope.go:117] "RemoveContainer" containerID="62fc80217f3946465f0f5d0d83b59bf3c3a8c7cea1b4b8754c8843a8b67e8e47" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.142949 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data" (OuterVolumeSpecName: "config-data") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.147648 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6c56309-82af-4734-a3d4-6c203fd5b23e" (UID: "a6c56309-82af-4734-a3d4-6c203fd5b23e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.157962 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican2dca-account-delete-4kp2b" event={"ID":"59926125-c1e1-4ac6-aa0e-2c4256046612","Type":"ContainerDied","Data":"62bcb5e1a9b9b67e4686267c3741b8d746921c8ea6d5b584a6ece17ed7a7472b"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.158405 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62bcb5e1a9b9b67e4686267c3741b8d746921c8ea6d5b584a6ece17ed7a7472b" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.158588 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican2dca-account-delete-4kp2b" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.171009 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.171053 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.171602 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "a6c56309-82af-4734-a3d4-6c203fd5b23e" (UID: "a6c56309-82af-4734-a3d4-6c203fd5b23e"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.175545 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.179843 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.189502 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_cda3e3c5-7a68-4269-8c15-b463b9263805/ovn-northd/0.log" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.189556 4940 generic.go:334] "Generic (PLEG): container finished" podID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerID="12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" exitCode=139 Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.193166 4940 generic.go:334] "Generic (PLEG): container finished" podID="69972749-03ff-48e9-b031-99c33ce86e96" containerID="ffc8d224e6ee06035af2a49a3dfbb96ff41fdb30dc4fc3b71983a00df2b005c0" exitCode=0 Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.195621 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="160032d4-a9c0-4b2c-be8b-f4a5c188c451" path="/var/lib/kubelet/pods/160032d4-a9c0-4b2c-be8b-f4a5c188c451/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.196618 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2952bb7b-f134-4f55-969b-e30cd8fbe53c" path="/var/lib/kubelet/pods/2952bb7b-f134-4f55-969b-e30cd8fbe53c/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.197407 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3031ed6c-4ad3-4d47-a902-4a52bb40be6d" path="/var/lib/kubelet/pods/3031ed6c-4ad3-4d47-a902-4a52bb40be6d/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.198847 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="310cee15-c1c5-4db3-8900-ca3107ba130d" path="/var/lib/kubelet/pods/310cee15-c1c5-4db3-8900-ca3107ba130d/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.200114 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-server-conf" (OuterVolumeSpecName: "server-conf") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.200439 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a6c9a20-06bc-43f8-aad9-fb5d72231110" path="/var/lib/kubelet/pods/4a6c9a20-06bc-43f8-aad9-fb5d72231110/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.202319 4940 generic.go:334] "Generic (PLEG): container finished" podID="77513168-a1ea-4794-a859-b942b0e9c262" containerID="91bd7e89a1d7d0eb89a61cc1bbe9824e672e433b9e8a1ae66449fbaea8335372" exitCode=0 Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.214255 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ac14fd0-8273-436b-89b8-a1478aaa226d" path="/var/lib/kubelet/pods/5ac14fd0-8273-436b-89b8-a1478aaa226d/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.215506 4940 generic.go:334] "Generic (PLEG): container finished" podID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerID="ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445" exitCode=0 Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.215640 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.217084 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf" path="/var/lib/kubelet/pods/605a7fbf-cd1f-4e44-a85c-0c1a7f1b87cf/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.222415 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi31fa-account-delete-fjlwz" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.228171 4940 generic.go:334] "Generic (PLEG): container finished" podID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerID="e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47" exitCode=0 Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.233532 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.235423 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63d173d6-a7cc-42f3-806d-50b9c8f8b189" path="/var/lib/kubelet/pods/63d173d6-a7cc-42f3-806d-50b9c8f8b189/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.236017 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79fc2ca6-8d13-44e6-83da-033c7f2d7df3" path="/var/lib/kubelet/pods/79fc2ca6-8d13-44e6-83da-033c7f2d7df3/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.243186 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "11a17576-9a94-4e2d-8915-9d838de09f0b" (UID: "11a17576-9a94-4e2d-8915-9d838de09f0b"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.248241 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96cf8cc9-e0ee-4aa2-9842-83d45e24c46a" path="/var/lib/kubelet/pods/96cf8cc9-e0ee-4aa2-9842-83d45e24c46a/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.248771 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be43a059-c201-4bf3-92ac-304d58de4c02" path="/var/lib/kubelet/pods/be43a059-c201-4bf3-92ac-304d58de4c02/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.249339 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce424ac9-8f67-48d5-9cd6-3274e3f9bc16" path="/var/lib/kubelet/pods/ce424ac9-8f67-48d5-9cd6-3274e3f9bc16/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.260986 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da80f9db-6be1-459c-9d61-ca1fc206d472" path="/var/lib/kubelet/pods/da80f9db-6be1-459c-9d61-ca1fc206d472/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.273716 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/11a17576-9a94-4e2d-8915-9d838de09f0b-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.274002 4940 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/11a17576-9a94-4e2d-8915-9d838de09f0b-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.274023 4940 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6c56309-82af-4734-a3d4-6c203fd5b23e-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.275183 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.276418 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e67a9769-a54d-4b7a-ac04-dcbe4bc5662c" path="/var/lib/kubelet/pods/e67a9769-a54d-4b7a-ac04-dcbe4bc5662c/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.277800 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb19b71d-413f-46df-a509-7dc7aff75598" path="/var/lib/kubelet/pods/eb19b71d-413f-46df-a509-7dc7aff75598/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.280100 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff45362c-e19e-470b-9b67-a1c2d6385ba9" path="/var/lib/kubelet/pods/ff45362c-e19e-470b-9b67-a1c2d6385ba9/volumes" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281784 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d","Type":"ContainerDied","Data":"3d68a4704d1d1d84d4a553f17d4619e94408f53609aec09a490bc06ed5381141"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281813 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"69f67fa7-ea74-4966-b69c-ab547896057e","Type":"ContainerDied","Data":"96daa4d06959246193e4e52966cb1077a7a8319911905a3cea480a0d594ab40e"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281827 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cda3e3c5-7a68-4269-8c15-b463b9263805","Type":"ContainerDied","Data":"12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281843 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"69972749-03ff-48e9-b031-99c33ce86e96","Type":"ContainerDied","Data":"ffc8d224e6ee06035af2a49a3dfbb96ff41fdb30dc4fc3b71983a00df2b005c0"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281856 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6f4459f4df-b92xj" event={"ID":"77513168-a1ea-4794-a859-b942b0e9c262","Type":"ContainerDied","Data":"91bd7e89a1d7d0eb89a61cc1bbe9824e672e433b9e8a1ae66449fbaea8335372"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281868 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"11a17576-9a94-4e2d-8915-9d838de09f0b","Type":"ContainerDied","Data":"ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281878 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"11a17576-9a94-4e2d-8915-9d838de09f0b","Type":"ContainerDied","Data":"97f14370f1831386762b36af2b7bf5f590662167ed2c659b48ed755255c404c4"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281887 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi31fa-account-delete-fjlwz" event={"ID":"9abf09e3-dd14-42f4-8b1d-de23d9f0f218","Type":"ContainerDied","Data":"de9f45f3fb0f077cbe530ea44f2493e6f12c4d95e52c74bb0598fd4b309aa534"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281898 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a6c56309-82af-4734-a3d4-6c203fd5b23e","Type":"ContainerDied","Data":"e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281908 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"a6c56309-82af-4734-a3d4-6c203fd5b23e","Type":"ContainerDied","Data":"5271caf04fd67d53db774793b10659d6750e94bf921bf9bb568acb4d9f4590bc"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.281916 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wx6d7" event={"ID":"3cc67a97-ea67-4814-a822-9a81d093db45","Type":"ContainerStarted","Data":"8fa957f45c7767d2c85334c147fc0f3f36c5625c6e496cb02b98297774a96b01"} Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.286990 4940 scope.go:117] "RemoveContainer" containerID="74f237af1603b7679312e5f0ffe714ea2bf8e1fd4fcf073910b8ab40cc83aaff" Nov 26 07:18:01 crc kubenswrapper[4940]: E1126 07:18:01.346659 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69f67fa7_ea74_4966_b69c_ab547896057e.slice/crio-96daa4d06959246193e4e52966cb1077a7a8319911905a3cea480a0d594ab40e\": RecentStats: unable to find data in memory cache]" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.349012 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.364757 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi31fa-account-delete-fjlwz"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.374725 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fk2tn\" (UniqueName: \"kubernetes.io/projected/77513168-a1ea-4794-a859-b942b0e9c262-kube-api-access-fk2tn\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.374797 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-internal-tls-certs\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.374821 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-public-tls-certs\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.374974 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-config-data\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.375060 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-combined-ca-bundle\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.375097 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-fernet-keys\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.375150 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-scripts\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.375216 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-credential-keys\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.379192 4940 scope.go:117] "RemoveContainer" containerID="af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.379694 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapi31fa-account-delete-fjlwz"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.379978 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-scripts" (OuterVolumeSpecName: "scripts") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.381705 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-7648b55b6f-h7txx" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.151:9696/\": dial tcp 10.217.0.151:9696: connect: connection refused" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.382317 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_cda3e3c5-7a68-4269-8c15-b463b9263805/ovn-northd/0.log" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.382418 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.385097 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.385128 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77513168-a1ea-4794-a859-b942b0e9c262-kube-api-access-fk2tn" (OuterVolumeSpecName: "kube-api-access-fk2tn") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "kube-api-access-fk2tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.389926 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.393599 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.398256 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.404527 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.404760 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-config-data" (OuterVolumeSpecName: "config-data") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.410634 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.430854 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.452563 4940 scope.go:117] "RemoveContainer" containerID="3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1" Nov 26 07:18:01 crc kubenswrapper[4940]: E1126 07:18:01.453267 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1\": container with ID starting with 3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1 not found: ID does not exist" containerID="3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.453503 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1"} err="failed to get container status \"3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1\": rpc error: code = NotFound desc = could not find container \"3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1\": container with ID starting with 3dbecdbe3caeaa6e120dd7919e3cf40e91dbeed38ebe11ff82b671aabb0078f1 not found: ID does not exist" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.453683 4940 scope.go:117] "RemoveContainer" containerID="31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076" Nov 26 07:18:01 crc kubenswrapper[4940]: E1126 07:18:01.468533 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076\": container with ID starting with 31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076 not found: ID does not exist" containerID="31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.468579 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076"} err="failed to get container status \"31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076\": rpc error: code = NotFound desc = could not find container \"31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076\": container with ID starting with 31c6a08214abc23934a5493c4b624de94b8fc55aeb9919f8c6800ab2cc4dd076 not found: ID does not exist" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.468606 4940 scope.go:117] "RemoveContainer" containerID="1396c22b993b0aca90db40c1aeddefa5385ff3803bd17b220761d0266e9bcfa2" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.471550 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.476692 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-combined-ca-bundle\") pod \"cda3e3c5-7a68-4269-8c15-b463b9263805\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.476893 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-erlang-cookie\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.477092 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmtc9\" (UniqueName: \"kubernetes.io/projected/cda3e3c5-7a68-4269-8c15-b463b9263805-kube-api-access-vmtc9\") pod \"cda3e3c5-7a68-4269-8c15-b463b9263805\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.477237 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-plugins-conf\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.477373 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-scripts\") pod \"cda3e3c5-7a68-4269-8c15-b463b9263805\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.476772 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.477314 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.477789 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-plugins\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.477888 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-server-conf\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.477990 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-config\") pod \"cda3e3c5-7a68-4269-8c15-b463b9263805\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.478086 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-scripts" (OuterVolumeSpecName: "scripts") pod "cda3e3c5-7a68-4269-8c15-b463b9263805" (UID: "cda3e3c5-7a68-4269-8c15-b463b9263805"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.478281 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qths2\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-kube-api-access-qths2\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.478378 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-rundir\") pod \"cda3e3c5-7a68-4269-8c15-b463b9263805\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.478466 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-northd-tls-certs\") pod \"cda3e3c5-7a68-4269-8c15-b463b9263805\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.478604 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-internal-tls-certs\") pod \"77513168-a1ea-4794-a859-b942b0e9c262\" (UID: \"77513168-a1ea-4794-a859-b942b0e9c262\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.478777 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.478900 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-metrics-certs-tls-certs\") pod \"cda3e3c5-7a68-4269-8c15-b463b9263805\" (UID: \"cda3e3c5-7a68-4269-8c15-b463b9263805\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.479018 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-tls\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.479121 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-confd\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.479219 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.479294 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/69972749-03ff-48e9-b031-99c33ce86e96-pod-info\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.479398 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/69972749-03ff-48e9-b031-99c33ce86e96-erlang-cookie-secret\") pod \"69972749-03ff-48e9-b031-99c33ce86e96\" (UID: \"69972749-03ff-48e9-b031-99c33ce86e96\") " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480014 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480132 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480211 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480326 4940 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480400 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fk2tn\" (UniqueName: \"kubernetes.io/projected/77513168-a1ea-4794-a859-b942b0e9c262-kube-api-access-fk2tn\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480477 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480610 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480716 4940 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.479031 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.479555 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.480684 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cda3e3c5-7a68-4269-8c15-b463b9263805-kube-api-access-vmtc9" (OuterVolumeSpecName: "kube-api-access-vmtc9") pod "cda3e3c5-7a68-4269-8c15-b463b9263805" (UID: "cda3e3c5-7a68-4269-8c15-b463b9263805"). InnerVolumeSpecName "kube-api-access-vmtc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.483714 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69972749-03ff-48e9-b031-99c33ce86e96-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: W1126 07:18:01.486588 4940 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/77513168-a1ea-4794-a859-b942b0e9c262/volumes/kubernetes.io~secret/internal-tls-certs Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.487995 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.486957 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-config" (OuterVolumeSpecName: "config") pod "cda3e3c5-7a68-4269-8c15-b463b9263805" (UID: "cda3e3c5-7a68-4269-8c15-b463b9263805"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.487127 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/69972749-03ff-48e9-b031-99c33ce86e96-pod-info" (OuterVolumeSpecName: "pod-info") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.487485 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "cda3e3c5-7a68-4269-8c15-b463b9263805" (UID: "cda3e3c5-7a68-4269-8c15-b463b9263805"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.489739 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.497369 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.497735 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.497891 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-kube-api-access-qths2" (OuterVolumeSpecName: "kube-api-access-qths2") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "kube-api-access-qths2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.508866 4940 scope.go:117] "RemoveContainer" containerID="9a54934efee8d36f8fe7bb895361ca1c8eea974267d635a96b512ed26e1494b7" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.509083 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cda3e3c5-7a68-4269-8c15-b463b9263805" (UID: "cda3e3c5-7a68-4269-8c15-b463b9263805"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.515009 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data" (OuterVolumeSpecName: "config-data") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.538797 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "77513168-a1ea-4794-a859-b942b0e9c262" (UID: "77513168-a1ea-4794-a859-b942b0e9c262"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.539815 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-server-conf" (OuterVolumeSpecName: "server-conf") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.541646 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.552916 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.568697 4940 scope.go:117] "RemoveContainer" containerID="54bb0d6665161c60a8df29f40f934d0511950d32bca2d3f2401e1f8db929d7db" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.572831 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "cda3e3c5-7a68-4269-8c15-b463b9263805" (UID: "cda3e3c5-7a68-4269-8c15-b463b9263805"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.578522 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "cda3e3c5-7a68-4269-8c15-b463b9263805" (UID: "cda3e3c5-7a68-4269-8c15-b463b9263805"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.579675 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "69972749-03ff-48e9-b031-99c33ce86e96" (UID: "69972749-03ff-48e9-b031-99c33ce86e96"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.582597 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cda3e3c5-7a68-4269-8c15-b463b9263805-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.582700 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qths2\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-kube-api-access-qths2\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.582783 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.582866 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cda3e3c5-7a68-4269-8c15-b463b9263805-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.582935 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583005 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/77513168-a1ea-4794-a859-b942b0e9c262-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583154 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583246 4940 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583315 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583369 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583432 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583516 4940 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/69972749-03ff-48e9-b031-99c33ce86e96-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583570 4940 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/69972749-03ff-48e9-b031-99c33ce86e96-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583625 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cda3e3c5-7a68-4269-8c15-b463b9263805-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583692 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmtc9\" (UniqueName: \"kubernetes.io/projected/cda3e3c5-7a68-4269-8c15-b463b9263805-kube-api-access-vmtc9\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583766 4940 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583836 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/69972749-03ff-48e9-b031-99c33ce86e96-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.583906 4940 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/69972749-03ff-48e9-b031-99c33ce86e96-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.597008 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.603588 4940 scope.go:117] "RemoveContainer" containerID="5fcae63004fc694c125bd0895a86667cfe1b52745e785eba586d24f5dff5b67d" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.629253 4940 scope.go:117] "RemoveContainer" containerID="aa12214a40e17b396195f7c3910988635f60c08b39041e13aa87c7d6cc3c84be" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.646943 4940 scope.go:117] "RemoveContainer" containerID="af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731" Nov 26 07:18:01 crc kubenswrapper[4940]: E1126 07:18:01.647369 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731\": container with ID starting with af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731 not found: ID does not exist" containerID="af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.647408 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731"} err="failed to get container status \"af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731\": rpc error: code = NotFound desc = could not find container \"af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731\": container with ID starting with af9d860a13eff7fcdbc5f6355c5b5f50262c7b4a42370c65ba188175630ae731 not found: ID does not exist" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.647439 4940 scope.go:117] "RemoveContainer" containerID="be9a79f6e9b952d84bd13f052112cb7a980f7370fa0b6edda5ce2ca0596c774c" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.666233 4940 scope.go:117] "RemoveContainer" containerID="f62286900f59e04e8b5935f72d22351db72e1e90c13dfd3c197b2183c1ddd5c0" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.685111 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.688385 4940 scope.go:117] "RemoveContainer" containerID="91bd7e89a1d7d0eb89a61cc1bbe9824e672e433b9e8a1ae66449fbaea8335372" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.711828 4940 scope.go:117] "RemoveContainer" containerID="ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.732766 4940 scope.go:117] "RemoveContainer" containerID="88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.775838 4940 scope.go:117] "RemoveContainer" containerID="ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445" Nov 26 07:18:01 crc kubenswrapper[4940]: E1126 07:18:01.776735 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445\": container with ID starting with ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445 not found: ID does not exist" containerID="ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.776782 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445"} err="failed to get container status \"ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445\": rpc error: code = NotFound desc = could not find container \"ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445\": container with ID starting with ad7a669a19a0235cfbe5f714409992ed42f795adc1f6f5d58990ec2c7974e445 not found: ID does not exist" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.776819 4940 scope.go:117] "RemoveContainer" containerID="88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493" Nov 26 07:18:01 crc kubenswrapper[4940]: E1126 07:18:01.777581 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493\": container with ID starting with 88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493 not found: ID does not exist" containerID="88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.777642 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493"} err="failed to get container status \"88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493\": rpc error: code = NotFound desc = could not find container \"88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493\": container with ID starting with 88758dff17d63329530811aa194b7ccddafb8e52420a60a734de06ea20a5b493 not found: ID does not exist" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.777683 4940 scope.go:117] "RemoveContainer" containerID="d13a5ed30c21b3f298bcf01eebabe76fcf8233b02bfcb43d1775bcd1e984e502" Nov 26 07:18:01 crc kubenswrapper[4940]: E1126 07:18:01.793371 4940 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 26 07:18:01 crc kubenswrapper[4940]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-26T07:17:54Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 26 07:18:01 crc kubenswrapper[4940]: /etc/init.d/functions: line 589: 386 Alarm clock "$@" Nov 26 07:18:01 crc kubenswrapper[4940]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-78r7g" message=< Nov 26 07:18:01 crc kubenswrapper[4940]: Exiting ovn-controller (1) [FAILED] Nov 26 07:18:01 crc kubenswrapper[4940]: Killing ovn-controller (1) [ OK ] Nov 26 07:18:01 crc kubenswrapper[4940]: Killing ovn-controller (1) with SIGKILL [ OK ] Nov 26 07:18:01 crc kubenswrapper[4940]: 2025-11-26T07:17:54Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 26 07:18:01 crc kubenswrapper[4940]: /etc/init.d/functions: line 589: 386 Alarm clock "$@" Nov 26 07:18:01 crc kubenswrapper[4940]: > Nov 26 07:18:01 crc kubenswrapper[4940]: E1126 07:18:01.793415 4940 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 26 07:18:01 crc kubenswrapper[4940]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-26T07:17:54Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 26 07:18:01 crc kubenswrapper[4940]: /etc/init.d/functions: line 589: 386 Alarm clock "$@" Nov 26 07:18:01 crc kubenswrapper[4940]: > pod="openstack/ovn-controller-78r7g" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" containerName="ovn-controller" containerID="cri-o://e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.793485 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-78r7g" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" containerName="ovn-controller" containerID="cri-o://e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3" gracePeriod=22 Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.807586 4940 scope.go:117] "RemoveContainer" containerID="e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47" Nov 26 07:18:01 crc kubenswrapper[4940]: I1126 07:18:01.896377 4940 scope.go:117] "RemoveContainer" containerID="3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.054191 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-7qg26"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.057146 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-7qg26"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.062810 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican2dca-account-delete-4kp2b"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.072188 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-2dca-account-create-update-z9bbp"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.072959 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-2dca-account-create-update-z9bbp"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.077542 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican2dca-account-delete-4kp2b"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.079600 4940 scope.go:117] "RemoveContainer" containerID="e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47" Nov 26 07:18:02 crc kubenswrapper[4940]: E1126 07:18:02.080123 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47\": container with ID starting with e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47 not found: ID does not exist" containerID="e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.080161 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47"} err="failed to get container status \"e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47\": rpc error: code = NotFound desc = could not find container \"e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47\": container with ID starting with e865bd635d1469bd09e1e51b7755a333de82abb56a0f8148c2fc8431702d5f47 not found: ID does not exist" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.080210 4940 scope.go:117] "RemoveContainer" containerID="3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc" Nov 26 07:18:02 crc kubenswrapper[4940]: E1126 07:18:02.080864 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc\": container with ID starting with 3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc not found: ID does not exist" containerID="3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.080904 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc"} err="failed to get container status \"3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc\": rpc error: code = NotFound desc = could not find container \"3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc\": container with ID starting with 3b9ab4e2513d0627cbc519359df8191c6c66a80f8edc7fea51c40f940c22aecc not found: ID does not exist" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.209508 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-78r7g_7ad552be-e28a-4873-a90e-867bd6efc437/ovn-controller/0.log" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.209580 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.257565 4940 generic.go:334] "Generic (PLEG): container finished" podID="f93321ef-2519-4bc0-b3d1-a45194267ca6" containerID="e146e6ba7d8a0d8f7e78795f75427c31b1e5c239738564750f702ee38a6b5f61" exitCode=0 Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.257639 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f93321ef-2519-4bc0-b3d1-a45194267ca6","Type":"ContainerDied","Data":"e146e6ba7d8a0d8f7e78795f75427c31b1e5c239738564750f702ee38a6b5f61"} Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.263422 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-78r7g_7ad552be-e28a-4873-a90e-867bd6efc437/ovn-controller/0.log" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.263478 4940 generic.go:334] "Generic (PLEG): container finished" podID="7ad552be-e28a-4873-a90e-867bd6efc437" containerID="e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3" exitCode=137 Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.263551 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g" event={"ID":"7ad552be-e28a-4873-a90e-867bd6efc437","Type":"ContainerDied","Data":"e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3"} Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.263553 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-78r7g" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.263593 4940 scope.go:117] "RemoveContainer" containerID="e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.263580 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-78r7g" event={"ID":"7ad552be-e28a-4873-a90e-867bd6efc437","Type":"ContainerDied","Data":"8c6d4034a3f964bd13dd7c743ac51daf0afa6b9515a58240b18416a90549f7ea"} Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.267472 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_cda3e3c5-7a68-4269-8c15-b463b9263805/ovn-northd/0.log" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.267536 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cda3e3c5-7a68-4269-8c15-b463b9263805","Type":"ContainerDied","Data":"b2080648fccfaaedf3f9719008cb2addb4595068338c612c22c45e803dcb1324"} Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.267595 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.277552 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"69972749-03ff-48e9-b031-99c33ce86e96","Type":"ContainerDied","Data":"09a2c45b9089f05df04ff2ec41301d3fa43172c173cb00961cf16e39ce91da21"} Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.277562 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.281436 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6f4459f4df-b92xj" event={"ID":"77513168-a1ea-4794-a859-b942b0e9c262","Type":"ContainerDied","Data":"2b4d396f8d23b9de4d00bef84e943afc398b45a0c8d76c59da504b4ad6b7a480"} Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.281503 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6f4459f4df-b92xj" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.294587 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-combined-ca-bundle\") pod \"7ad552be-e28a-4873-a90e-867bd6efc437\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.294850 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-log-ovn\") pod \"7ad552be-e28a-4873-a90e-867bd6efc437\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.294932 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run\") pod \"7ad552be-e28a-4873-a90e-867bd6efc437\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.294978 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run" (OuterVolumeSpecName: "var-run") pod "7ad552be-e28a-4873-a90e-867bd6efc437" (UID: "7ad552be-e28a-4873-a90e-867bd6efc437"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.294952 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "7ad552be-e28a-4873-a90e-867bd6efc437" (UID: "7ad552be-e28a-4873-a90e-867bd6efc437"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.295013 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ad552be-e28a-4873-a90e-867bd6efc437-scripts\") pod \"7ad552be-e28a-4873-a90e-867bd6efc437\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.295099 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtx8b\" (UniqueName: \"kubernetes.io/projected/7ad552be-e28a-4873-a90e-867bd6efc437-kube-api-access-dtx8b\") pod \"7ad552be-e28a-4873-a90e-867bd6efc437\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.295177 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-ovn-controller-tls-certs\") pod \"7ad552be-e28a-4873-a90e-867bd6efc437\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.295210 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run-ovn\") pod \"7ad552be-e28a-4873-a90e-867bd6efc437\" (UID: \"7ad552be-e28a-4873-a90e-867bd6efc437\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.296487 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ad552be-e28a-4873-a90e-867bd6efc437-scripts" (OuterVolumeSpecName: "scripts") pod "7ad552be-e28a-4873-a90e-867bd6efc437" (UID: "7ad552be-e28a-4873-a90e-867bd6efc437"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.297831 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "7ad552be-e28a-4873-a90e-867bd6efc437" (UID: "7ad552be-e28a-4873-a90e-867bd6efc437"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.296212 4940 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.302407 4940 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.318014 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.326083 4940 generic.go:334] "Generic (PLEG): container finished" podID="3cc67a97-ea67-4814-a822-9a81d093db45" containerID="e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6" exitCode=0 Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.326122 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wx6d7" event={"ID":"3cc67a97-ea67-4814-a822-9a81d093db45","Type":"ContainerDied","Data":"e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6"} Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.328581 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.335606 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ad552be-e28a-4873-a90e-867bd6efc437-kube-api-access-dtx8b" (OuterVolumeSpecName: "kube-api-access-dtx8b") pod "7ad552be-e28a-4873-a90e-867bd6efc437" (UID: "7ad552be-e28a-4873-a90e-867bd6efc437"). InnerVolumeSpecName "kube-api-access-dtx8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.335644 4940 scope.go:117] "RemoveContainer" containerID="e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.335669 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:18:02 crc kubenswrapper[4940]: E1126 07:18:02.339603 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3\": container with ID starting with e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3 not found: ID does not exist" containerID="e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.339642 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3"} err="failed to get container status \"e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3\": rpc error: code = NotFound desc = could not find container \"e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3\": container with ID starting with e2fa9594932fa83ee56718047aedbf16aaea7b024499581e0577092d1f98cdf3 not found: ID does not exist" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.339669 4940 scope.go:117] "RemoveContainer" containerID="b0bf7e3ce23d8aabef777f75a95394afce2f910c3c79d9220de648b9329b628f" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.364869 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.372120 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ad552be-e28a-4873-a90e-867bd6efc437" (UID: "7ad552be-e28a-4873-a90e-867bd6efc437"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.380295 4940 scope.go:117] "RemoveContainer" containerID="12ff99aeba7537f0147a09e4278df695cbd24bddf2045032633b6666a9e54425" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.407133 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.410015 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ad552be-e28a-4873-a90e-867bd6efc437-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.410073 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtx8b\" (UniqueName: \"kubernetes.io/projected/7ad552be-e28a-4873-a90e-867bd6efc437-kube-api-access-dtx8b\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.410087 4940 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7ad552be-e28a-4873-a90e-867bd6efc437-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.410106 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.412265 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "7ad552be-e28a-4873-a90e-867bd6efc437" (UID: "7ad552be-e28a-4873-a90e-867bd6efc437"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.422403 4940 scope.go:117] "RemoveContainer" containerID="ffc8d224e6ee06035af2a49a3dfbb96ff41fdb30dc4fc3b71983a00df2b005c0" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.439255 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6f4459f4df-b92xj"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.448047 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-6f4459f4df-b92xj"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.450875 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.459077 4940 scope.go:117] "RemoveContainer" containerID="8e9e90c1c11a39ed3cb269a3561acbf16d889c8ed343e1427b9f27c9d40de9c6" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.511728 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad552be-e28a-4873-a90e-867bd6efc437-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.592172 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-78r7g"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.598874 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-78r7g"] Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.612768 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-combined-ca-bundle\") pod \"f93321ef-2519-4bc0-b3d1-a45194267ca6\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.612929 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skbp5\" (UniqueName: \"kubernetes.io/projected/f93321ef-2519-4bc0-b3d1-a45194267ca6-kube-api-access-skbp5\") pod \"f93321ef-2519-4bc0-b3d1-a45194267ca6\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.612962 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-config-data\") pod \"f93321ef-2519-4bc0-b3d1-a45194267ca6\" (UID: \"f93321ef-2519-4bc0-b3d1-a45194267ca6\") " Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.615951 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f93321ef-2519-4bc0-b3d1-a45194267ca6-kube-api-access-skbp5" (OuterVolumeSpecName: "kube-api-access-skbp5") pod "f93321ef-2519-4bc0-b3d1-a45194267ca6" (UID: "f93321ef-2519-4bc0-b3d1-a45194267ca6"). InnerVolumeSpecName "kube-api-access-skbp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.635653 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f93321ef-2519-4bc0-b3d1-a45194267ca6" (UID: "f93321ef-2519-4bc0-b3d1-a45194267ca6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.635693 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-config-data" (OuterVolumeSpecName: "config-data") pod "f93321ef-2519-4bc0-b3d1-a45194267ca6" (UID: "f93321ef-2519-4bc0-b3d1-a45194267ca6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.715257 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.715300 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skbp5\" (UniqueName: \"kubernetes.io/projected/f93321ef-2519-4bc0-b3d1-a45194267ca6-kube-api-access-skbp5\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:02 crc kubenswrapper[4940]: I1126 07:18:02.715454 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93321ef-2519-4bc0-b3d1-a45194267ca6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.177288 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11a17576-9a94-4e2d-8915-9d838de09f0b" path="/var/lib/kubelet/pods/11a17576-9a94-4e2d-8915-9d838de09f0b/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.178755 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59926125-c1e1-4ac6-aa0e-2c4256046612" path="/var/lib/kubelet/pods/59926125-c1e1-4ac6-aa0e-2c4256046612/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.179706 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69972749-03ff-48e9-b031-99c33ce86e96" path="/var/lib/kubelet/pods/69972749-03ff-48e9-b031-99c33ce86e96/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.181345 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69f67fa7-ea74-4966-b69c-ab547896057e" path="/var/lib/kubelet/pods/69f67fa7-ea74-4966-b69c-ab547896057e/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.182007 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" path="/var/lib/kubelet/pods/6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.182601 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77513168-a1ea-4794-a859-b942b0e9c262" path="/var/lib/kubelet/pods/77513168-a1ea-4794-a859-b942b0e9c262/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.183894 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" path="/var/lib/kubelet/pods/7ad552be-e28a-4873-a90e-867bd6efc437/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.184689 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cb1ded9-17a3-40e3-955b-11e63806cd6f" path="/var/lib/kubelet/pods/8cb1ded9-17a3-40e3-955b-11e63806cd6f/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.185328 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9abf09e3-dd14-42f4-8b1d-de23d9f0f218" path="/var/lib/kubelet/pods/9abf09e3-dd14-42f4-8b1d-de23d9f0f218/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.190160 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6c56309-82af-4734-a3d4-6c203fd5b23e" path="/var/lib/kubelet/pods/a6c56309-82af-4734-a3d4-6c203fd5b23e/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.191021 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" path="/var/lib/kubelet/pods/cda3e3c5-7a68-4269-8c15-b463b9263805/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.192569 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf5afc28-5f57-4d6f-97de-0bac57eb7a1c" path="/var/lib/kubelet/pods/cf5afc28-5f57-4d6f-97de-0bac57eb7a1c/volumes" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.338622 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f93321ef-2519-4bc0-b3d1-a45194267ca6","Type":"ContainerDied","Data":"317a1977bbab7b14a5ff1bcf543ad9858d69cbfe1d93caa4df99a433df6079af"} Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.338636 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.338870 4940 scope.go:117] "RemoveContainer" containerID="e146e6ba7d8a0d8f7e78795f75427c31b1e5c239738564750f702ee38a6b5f61" Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.358031 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:18:03 crc kubenswrapper[4940]: I1126 07:18:03.364056 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:18:04 crc kubenswrapper[4940]: I1126 07:18:04.348284 4940 generic.go:334] "Generic (PLEG): container finished" podID="3cc67a97-ea67-4814-a822-9a81d093db45" containerID="978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c" exitCode=0 Nov 26 07:18:04 crc kubenswrapper[4940]: I1126 07:18:04.348329 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wx6d7" event={"ID":"3cc67a97-ea67-4814-a822-9a81d093db45","Type":"ContainerDied","Data":"978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c"} Nov 26 07:18:05 crc kubenswrapper[4940]: I1126 07:18:05.175106 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f93321ef-2519-4bc0-b3d1-a45194267ca6" path="/var/lib/kubelet/pods/f93321ef-2519-4bc0-b3d1-a45194267ca6/volumes" Nov 26 07:18:05 crc kubenswrapper[4940]: I1126 07:18:05.356971 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wx6d7" event={"ID":"3cc67a97-ea67-4814-a822-9a81d093db45","Type":"ContainerStarted","Data":"7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f"} Nov 26 07:18:05 crc kubenswrapper[4940]: I1126 07:18:05.376591 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wx6d7" podStartSLOduration=4.979100577 podStartE2EDuration="7.376574525s" podCreationTimestamp="2025-11-26 07:17:58 +0000 UTC" firstStartedPulling="2025-11-26 07:18:02.335319099 +0000 UTC m=+1383.855460718" lastFinishedPulling="2025-11-26 07:18:04.732793037 +0000 UTC m=+1386.252934666" observedRunningTime="2025-11-26 07:18:05.375874823 +0000 UTC m=+1386.896016442" watchObservedRunningTime="2025-11-26 07:18:05.376574525 +0000 UTC m=+1386.896716144" Nov 26 07:18:05 crc kubenswrapper[4940]: E1126 07:18:05.615004 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:05 crc kubenswrapper[4940]: E1126 07:18:05.615439 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:05 crc kubenswrapper[4940]: E1126 07:18:05.615759 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:05 crc kubenswrapper[4940]: E1126 07:18:05.615797 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:05 crc kubenswrapper[4940]: E1126 07:18:05.623323 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:05 crc kubenswrapper[4940]: E1126 07:18:05.624666 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:05 crc kubenswrapper[4940]: E1126 07:18:05.628352 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:05 crc kubenswrapper[4940]: E1126 07:18:05.628410 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:09 crc kubenswrapper[4940]: I1126 07:18:09.794028 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:18:09 crc kubenswrapper[4940]: I1126 07:18:09.795013 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:18:09 crc kubenswrapper[4940]: I1126 07:18:09.842997 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:18:10 crc kubenswrapper[4940]: I1126 07:18:10.455249 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:18:10 crc kubenswrapper[4940]: I1126 07:18:10.511267 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wx6d7"] Nov 26 07:18:10 crc kubenswrapper[4940]: E1126 07:18:10.615069 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:10 crc kubenswrapper[4940]: E1126 07:18:10.615722 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:10 crc kubenswrapper[4940]: E1126 07:18:10.616445 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:10 crc kubenswrapper[4940]: E1126 07:18:10.616583 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:10 crc kubenswrapper[4940]: E1126 07:18:10.616610 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:10 crc kubenswrapper[4940]: E1126 07:18:10.617906 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:10 crc kubenswrapper[4940]: E1126 07:18:10.622863 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:10 crc kubenswrapper[4940]: E1126 07:18:10.622933 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.232513 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.276078 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-public-tls-certs\") pod \"e9325bed-7edc-41a3-a53c-fb5d147532f5\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.276277 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-httpd-config\") pod \"e9325bed-7edc-41a3-a53c-fb5d147532f5\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.276364 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-combined-ca-bundle\") pod \"e9325bed-7edc-41a3-a53c-fb5d147532f5\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.276484 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pb85b\" (UniqueName: \"kubernetes.io/projected/e9325bed-7edc-41a3-a53c-fb5d147532f5-kube-api-access-pb85b\") pod \"e9325bed-7edc-41a3-a53c-fb5d147532f5\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.276562 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-ovndb-tls-certs\") pod \"e9325bed-7edc-41a3-a53c-fb5d147532f5\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.276869 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-config\") pod \"e9325bed-7edc-41a3-a53c-fb5d147532f5\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.276920 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-internal-tls-certs\") pod \"e9325bed-7edc-41a3-a53c-fb5d147532f5\" (UID: \"e9325bed-7edc-41a3-a53c-fb5d147532f5\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.283610 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "e9325bed-7edc-41a3-a53c-fb5d147532f5" (UID: "e9325bed-7edc-41a3-a53c-fb5d147532f5"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.284227 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9325bed-7edc-41a3-a53c-fb5d147532f5-kube-api-access-pb85b" (OuterVolumeSpecName: "kube-api-access-pb85b") pod "e9325bed-7edc-41a3-a53c-fb5d147532f5" (UID: "e9325bed-7edc-41a3-a53c-fb5d147532f5"). InnerVolumeSpecName "kube-api-access-pb85b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.315159 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e9325bed-7edc-41a3-a53c-fb5d147532f5" (UID: "e9325bed-7edc-41a3-a53c-fb5d147532f5"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.332855 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e9325bed-7edc-41a3-a53c-fb5d147532f5" (UID: "e9325bed-7edc-41a3-a53c-fb5d147532f5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.333567 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9325bed-7edc-41a3-a53c-fb5d147532f5" (UID: "e9325bed-7edc-41a3-a53c-fb5d147532f5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.334402 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "e9325bed-7edc-41a3-a53c-fb5d147532f5" (UID: "e9325bed-7edc-41a3-a53c-fb5d147532f5"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.335851 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-config" (OuterVolumeSpecName: "config") pod "e9325bed-7edc-41a3-a53c-fb5d147532f5" (UID: "e9325bed-7edc-41a3-a53c-fb5d147532f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.378000 4940 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.378051 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.378061 4940 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.378070 4940 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.378079 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.378087 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9325bed-7edc-41a3-a53c-fb5d147532f5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.378095 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pb85b\" (UniqueName: \"kubernetes.io/projected/e9325bed-7edc-41a3-a53c-fb5d147532f5-kube-api-access-pb85b\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.420584 4940 generic.go:334] "Generic (PLEG): container finished" podID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerID="8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb" exitCode=0 Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.420646 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7648b55b6f-h7txx" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.420683 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7648b55b6f-h7txx" event={"ID":"e9325bed-7edc-41a3-a53c-fb5d147532f5","Type":"ContainerDied","Data":"8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb"} Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.420759 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7648b55b6f-h7txx" event={"ID":"e9325bed-7edc-41a3-a53c-fb5d147532f5","Type":"ContainerDied","Data":"b5dc4b531264bd8a86c07f33beee4a7907a2b2779a72899d2212212d3ecadb41"} Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.420794 4940 scope.go:117] "RemoveContainer" containerID="5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.421170 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wx6d7" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" containerName="registry-server" containerID="cri-o://7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f" gracePeriod=2 Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.460376 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7648b55b6f-h7txx"] Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.461743 4940 scope.go:117] "RemoveContainer" containerID="8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.467561 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7648b55b6f-h7txx"] Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.491187 4940 scope.go:117] "RemoveContainer" containerID="5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a" Nov 26 07:18:12 crc kubenswrapper[4940]: E1126 07:18:12.491685 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a\": container with ID starting with 5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a not found: ID does not exist" containerID="5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.491720 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a"} err="failed to get container status \"5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a\": rpc error: code = NotFound desc = could not find container \"5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a\": container with ID starting with 5fc84a85007889f6e19c33e918f3068d7cb6d3c1ccfa0943c2fa5d34ad2c085a not found: ID does not exist" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.491743 4940 scope.go:117] "RemoveContainer" containerID="8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb" Nov 26 07:18:12 crc kubenswrapper[4940]: E1126 07:18:12.492050 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb\": container with ID starting with 8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb not found: ID does not exist" containerID="8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.492069 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb"} err="failed to get container status \"8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb\": rpc error: code = NotFound desc = could not find container \"8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb\": container with ID starting with 8cf2f71b654cc7f218beca5289d3a1c7e57e6c76760d46e5fc4879c2ce2df2eb not found: ID does not exist" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.878349 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.888837 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-utilities\") pod \"3cc67a97-ea67-4814-a822-9a81d093db45\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.888980 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-catalog-content\") pod \"3cc67a97-ea67-4814-a822-9a81d093db45\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.889117 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfnpd\" (UniqueName: \"kubernetes.io/projected/3cc67a97-ea67-4814-a822-9a81d093db45-kube-api-access-gfnpd\") pod \"3cc67a97-ea67-4814-a822-9a81d093db45\" (UID: \"3cc67a97-ea67-4814-a822-9a81d093db45\") " Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.889854 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-utilities" (OuterVolumeSpecName: "utilities") pod "3cc67a97-ea67-4814-a822-9a81d093db45" (UID: "3cc67a97-ea67-4814-a822-9a81d093db45"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.891882 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cc67a97-ea67-4814-a822-9a81d093db45-kube-api-access-gfnpd" (OuterVolumeSpecName: "kube-api-access-gfnpd") pod "3cc67a97-ea67-4814-a822-9a81d093db45" (UID: "3cc67a97-ea67-4814-a822-9a81d093db45"). InnerVolumeSpecName "kube-api-access-gfnpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.961921 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3cc67a97-ea67-4814-a822-9a81d093db45" (UID: "3cc67a97-ea67-4814-a822-9a81d093db45"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.991506 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfnpd\" (UniqueName: \"kubernetes.io/projected/3cc67a97-ea67-4814-a822-9a81d093db45-kube-api-access-gfnpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.991564 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:12 crc kubenswrapper[4940]: I1126 07:18:12.991582 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cc67a97-ea67-4814-a822-9a81d093db45-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.178194 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" path="/var/lib/kubelet/pods/e9325bed-7edc-41a3-a53c-fb5d147532f5/volumes" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.440156 4940 generic.go:334] "Generic (PLEG): container finished" podID="3cc67a97-ea67-4814-a822-9a81d093db45" containerID="7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f" exitCode=0 Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.440231 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wx6d7" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.441142 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wx6d7" event={"ID":"3cc67a97-ea67-4814-a822-9a81d093db45","Type":"ContainerDied","Data":"7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f"} Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.441220 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wx6d7" event={"ID":"3cc67a97-ea67-4814-a822-9a81d093db45","Type":"ContainerDied","Data":"8fa957f45c7767d2c85334c147fc0f3f36c5625c6e496cb02b98297774a96b01"} Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.441245 4940 scope.go:117] "RemoveContainer" containerID="7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.473602 4940 scope.go:117] "RemoveContainer" containerID="978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.474575 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wx6d7"] Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.483139 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wx6d7"] Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.497481 4940 scope.go:117] "RemoveContainer" containerID="e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.519309 4940 scope.go:117] "RemoveContainer" containerID="7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f" Nov 26 07:18:13 crc kubenswrapper[4940]: E1126 07:18:13.519726 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f\": container with ID starting with 7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f not found: ID does not exist" containerID="7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.519773 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f"} err="failed to get container status \"7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f\": rpc error: code = NotFound desc = could not find container \"7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f\": container with ID starting with 7b47683279dd0820f620fe8dae229ca1de0f532fa10ec1942f9d75115a8b2c0f not found: ID does not exist" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.519804 4940 scope.go:117] "RemoveContainer" containerID="978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c" Nov 26 07:18:13 crc kubenswrapper[4940]: E1126 07:18:13.520545 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c\": container with ID starting with 978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c not found: ID does not exist" containerID="978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.520574 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c"} err="failed to get container status \"978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c\": rpc error: code = NotFound desc = could not find container \"978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c\": container with ID starting with 978ecf2030a694f549bea964414414a49e1460b9c3aa9ea3b06c693c8e907b0c not found: ID does not exist" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.520593 4940 scope.go:117] "RemoveContainer" containerID="e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6" Nov 26 07:18:13 crc kubenswrapper[4940]: E1126 07:18:13.520889 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6\": container with ID starting with e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6 not found: ID does not exist" containerID="e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6" Nov 26 07:18:13 crc kubenswrapper[4940]: I1126 07:18:13.520925 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6"} err="failed to get container status \"e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6\": rpc error: code = NotFound desc = could not find container \"e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6\": container with ID starting with e8e90ff64e61b46b8e738cfe31873774ecd5336e02845e0ed770a1a965e8c8c6 not found: ID does not exist" Nov 26 07:18:15 crc kubenswrapper[4940]: I1126 07:18:15.200673 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" path="/var/lib/kubelet/pods/3cc67a97-ea67-4814-a822-9a81d093db45/volumes" Nov 26 07:18:15 crc kubenswrapper[4940]: E1126 07:18:15.615359 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:15 crc kubenswrapper[4940]: E1126 07:18:15.615893 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:15 crc kubenswrapper[4940]: E1126 07:18:15.616241 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:15 crc kubenswrapper[4940]: E1126 07:18:15.616275 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:15 crc kubenswrapper[4940]: E1126 07:18:15.616575 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:15 crc kubenswrapper[4940]: E1126 07:18:15.618623 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:15 crc kubenswrapper[4940]: E1126 07:18:15.619992 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:15 crc kubenswrapper[4940]: E1126 07:18:15.620030 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:20 crc kubenswrapper[4940]: E1126 07:18:20.615292 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:20 crc kubenswrapper[4940]: E1126 07:18:20.616899 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:20 crc kubenswrapper[4940]: E1126 07:18:20.617661 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:20 crc kubenswrapper[4940]: E1126 07:18:20.618168 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:20 crc kubenswrapper[4940]: E1126 07:18:20.618224 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:20 crc kubenswrapper[4940]: E1126 07:18:20.618896 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:20 crc kubenswrapper[4940]: E1126 07:18:20.620634 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:20 crc kubenswrapper[4940]: E1126 07:18:20.620686 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:21 crc kubenswrapper[4940]: I1126 07:18:21.728308 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:18:21 crc kubenswrapper[4940]: I1126 07:18:21.728400 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:18:21 crc kubenswrapper[4940]: I1126 07:18:21.728477 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:18:21 crc kubenswrapper[4940]: I1126 07:18:21.729523 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8474433f4fb6790caac5b18927cabd8ff680b641694a71892a6b78f7af1c5c17"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:18:21 crc kubenswrapper[4940]: I1126 07:18:21.729635 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://8474433f4fb6790caac5b18927cabd8ff680b641694a71892a6b78f7af1c5c17" gracePeriod=600 Nov 26 07:18:22 crc kubenswrapper[4940]: I1126 07:18:22.525902 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="8474433f4fb6790caac5b18927cabd8ff680b641694a71892a6b78f7af1c5c17" exitCode=0 Nov 26 07:18:22 crc kubenswrapper[4940]: I1126 07:18:22.525975 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"8474433f4fb6790caac5b18927cabd8ff680b641694a71892a6b78f7af1c5c17"} Nov 26 07:18:22 crc kubenswrapper[4940]: I1126 07:18:22.526413 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c"} Nov 26 07:18:22 crc kubenswrapper[4940]: I1126 07:18:22.526462 4940 scope.go:117] "RemoveContainer" containerID="0393c811a7e2a741f1c1d69f74428b6878add21bd4ed185abcf620da55c2d4a4" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.315852 4940 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316248 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" containerName="registry-server" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316264 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" containerName="registry-server" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316274 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="ovn-northd" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316282 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="ovn-northd" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316299 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" containerName="ovn-controller" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316306 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" containerName="ovn-controller" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316318 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" containerName="extract-content" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316324 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" containerName="extract-content" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316335 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-api" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316342 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-api" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316355 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da80f9db-6be1-459c-9d61-ca1fc206d472" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316362 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="da80f9db-6be1-459c-9d61-ca1fc206d472" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316371 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerName="mysql-bootstrap" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316379 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerName="mysql-bootstrap" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316390 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e67a9769-a54d-4b7a-ac04-dcbe4bc5662c" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316401 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e67a9769-a54d-4b7a-ac04-dcbe4bc5662c" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316416 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" containerName="extract-utilities" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316424 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" containerName="extract-utilities" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316437 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f93321ef-2519-4bc0-b3d1-a45194267ca6" containerName="nova-cell0-conductor-conductor" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316443 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f93321ef-2519-4bc0-b3d1-a45194267ca6" containerName="nova-cell0-conductor-conductor" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316455 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb19b71d-413f-46df-a509-7dc7aff75598" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316463 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb19b71d-413f-46df-a509-7dc7aff75598" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316476 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-httpd" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316483 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-httpd" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316503 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69972749-03ff-48e9-b031-99c33ce86e96" containerName="rabbitmq" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316544 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69972749-03ff-48e9-b031-99c33ce86e96" containerName="rabbitmq" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316556 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerName="rabbitmq" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316563 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerName="rabbitmq" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316578 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerName="setup-container" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316586 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerName="setup-container" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316602 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" containerName="nova-scheduler-scheduler" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316610 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" containerName="nova-scheduler-scheduler" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316621 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerName="galera" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316628 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerName="galera" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316643 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69972749-03ff-48e9-b031-99c33ce86e96" containerName="setup-container" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316650 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69972749-03ff-48e9-b031-99c33ce86e96" containerName="setup-container" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316661 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69f67fa7-ea74-4966-b69c-ab547896057e" containerName="memcached" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316669 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69f67fa7-ea74-4966-b69c-ab547896057e" containerName="memcached" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316677 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="openstack-network-exporter" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316684 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="openstack-network-exporter" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316693 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59926125-c1e1-4ac6-aa0e-2c4256046612" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316701 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="59926125-c1e1-4ac6-aa0e-2c4256046612" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316717 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77513168-a1ea-4794-a859-b942b0e9c262" containerName="keystone-api" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316725 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="77513168-a1ea-4794-a859-b942b0e9c262" containerName="keystone-api" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316738 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9abf09e3-dd14-42f4-8b1d-de23d9f0f218" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316745 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9abf09e3-dd14-42f4-8b1d-de23d9f0f218" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316758 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="160032d4-a9c0-4b2c-be8b-f4a5c188c451" containerName="nova-cell1-conductor-conductor" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316765 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="160032d4-a9c0-4b2c-be8b-f4a5c188c451" containerName="nova-cell1-conductor-conductor" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.316778 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a6c9a20-06bc-43f8-aad9-fb5d72231110" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316785 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a6c9a20-06bc-43f8-aad9-fb5d72231110" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316956 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="openstack-network-exporter" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316969 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="11a17576-9a94-4e2d-8915-9d838de09f0b" containerName="rabbitmq" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316977 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-api" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.316992 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69972749-03ff-48e9-b031-99c33ce86e96" containerName="rabbitmq" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317003 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9325bed-7edc-41a3-a53c-fb5d147532f5" containerName="neutron-httpd" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317016 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f93321ef-2519-4bc0-b3d1-a45194267ca6" containerName="nova-cell0-conductor-conductor" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317024 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69f67fa7-ea74-4966-b69c-ab547896057e" containerName="memcached" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317052 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a6c9a20-06bc-43f8-aad9-fb5d72231110" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317064 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb19b71d-413f-46df-a509-7dc7aff75598" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317078 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda3e3c5-7a68-4269-8c15-b463b9263805" containerName="ovn-northd" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317092 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6c56309-82af-4734-a3d4-6c203fd5b23e" containerName="galera" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317108 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e67a9769-a54d-4b7a-ac04-dcbe4bc5662c" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317119 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="da80f9db-6be1-459c-9d61-ca1fc206d472" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317127 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cc67a97-ea67-4814-a822-9a81d093db45" containerName="registry-server" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317138 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ad552be-e28a-4873-a90e-867bd6efc437" containerName="ovn-controller" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317148 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="59926125-c1e1-4ac6-aa0e-2c4256046612" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317162 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="77513168-a1ea-4794-a859-b942b0e9c262" containerName="keystone-api" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317172 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="160032d4-a9c0-4b2c-be8b-f4a5c188c451" containerName="nova-cell1-conductor-conductor" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317186 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9abf09e3-dd14-42f4-8b1d-de23d9f0f218" containerName="mariadb-account-delete" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317198 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a6bb8be-4f0f-4e55-ad79-ad0e3a35168d" containerName="nova-scheduler-scheduler" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.317854 4940 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.318015 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.318075 4940 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.318143 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f" gracePeriod=15 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.318184 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40" gracePeriod=15 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.318280 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30" gracePeriod=15 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.318292 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea" gracePeriod=15 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.318277 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2" gracePeriod=15 Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.320197 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320216 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.320233 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320240 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.320252 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320259 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.320276 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320282 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.320299 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320306 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.320324 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320331 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.320344 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320351 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320563 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320577 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320586 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320592 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320614 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.320632 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.324540 4940 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.331600 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.331641 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.333336 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.363592 4940 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.58:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.405437 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.405482 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.405635 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.405708 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.405767 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.405808 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.405880 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.405932 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.507550 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.507860 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.507623 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.507927 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.507939 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.507884 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.507979 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508013 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508029 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508088 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508098 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508116 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508123 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508142 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508173 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.508176 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.535392 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.536576 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.537285 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30" exitCode=0 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.537312 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40" exitCode=0 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.537320 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2" exitCode=0 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.537327 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea" exitCode=2 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.537384 4940 scope.go:117] "RemoveContainer" containerID="f40db66ddac3401a7e6e91103b0816bd02743508093d3d91e4d9ea6357a43048" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.541390 4940 generic.go:334] "Generic (PLEG): container finished" podID="66d37527-e535-45de-9f92-0f95d9f7a856" containerID="e925b4a56b72465b8a8a205a019f66039a756526e43166d81d1d6a0c679b4910" exitCode=0 Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.541420 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"66d37527-e535-45de-9f92-0f95d9f7a856","Type":"ContainerDied","Data":"e925b4a56b72465b8a8a205a019f66039a756526e43166d81d1d6a0c679b4910"} Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.541960 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:23 crc kubenswrapper[4940]: I1126 07:18:23.664835 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:23 crc kubenswrapper[4940]: W1126 07:18:23.692389 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-09511ad90a3d0892908ca24364dcdd03acc60f186685688f2da5252770433df5 WatchSource:0}: Error finding container 09511ad90a3d0892908ca24364dcdd03acc60f186685688f2da5252770433df5: Status 404 returned error can't find the container with id 09511ad90a3d0892908ca24364dcdd03acc60f186685688f2da5252770433df5 Nov 26 07:18:23 crc kubenswrapper[4940]: E1126 07:18:23.695932 4940 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.58:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b7d59cd48d8b3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 07:18:23.695345843 +0000 UTC m=+1405.215487472,LastTimestamp:2025-11-26 07:18:23.695345843 +0000 UTC m=+1405.215487472,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.553534 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.563605 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerID="5968ea4146aabb9243cd7bc0fcedda38425122a8df83965feee0250ed0d15f33" exitCode=137 Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.563689 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"5968ea4146aabb9243cd7bc0fcedda38425122a8df83965feee0250ed0d15f33"} Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.565973 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"c1ab9a87d5d2ccfa26b711c62908b242ca9a913f805f38c90d64487038dfd7b5"} Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.566008 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"09511ad90a3d0892908ca24364dcdd03acc60f186685688f2da5252770433df5"} Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.566724 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:24 crc kubenswrapper[4940]: E1126 07:18:24.566804 4940 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.58:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.568243 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-k9l7t_5f26eaaa-63b0-491d-b664-56edff3be80c/ovs-vswitchd/0.log" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.569094 4940 generic.go:334] "Generic (PLEG): container finished" podID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" exitCode=137 Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.569168 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k9l7t" event={"ID":"5f26eaaa-63b0-491d-b664-56edff3be80c","Type":"ContainerDied","Data":"b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf"} Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.836183 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.837940 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.927631 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-var-lock\") pod \"66d37527-e535-45de-9f92-0f95d9f7a856\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.927749 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-kubelet-dir\") pod \"66d37527-e535-45de-9f92-0f95d9f7a856\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.927796 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66d37527-e535-45de-9f92-0f95d9f7a856-kube-api-access\") pod \"66d37527-e535-45de-9f92-0f95d9f7a856\" (UID: \"66d37527-e535-45de-9f92-0f95d9f7a856\") " Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.927743 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-var-lock" (OuterVolumeSpecName: "var-lock") pod "66d37527-e535-45de-9f92-0f95d9f7a856" (UID: "66d37527-e535-45de-9f92-0f95d9f7a856"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.927769 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "66d37527-e535-45de-9f92-0f95d9f7a856" (UID: "66d37527-e535-45de-9f92-0f95d9f7a856"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.928073 4940 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.928091 4940 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66d37527-e535-45de-9f92-0f95d9f7a856-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:24 crc kubenswrapper[4940]: I1126 07:18:24.933980 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66d37527-e535-45de-9f92-0f95d9f7a856-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "66d37527-e535-45de-9f92-0f95d9f7a856" (UID: "66d37527-e535-45de-9f92-0f95d9f7a856"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.029371 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66d37527-e535-45de-9f92-0f95d9f7a856-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.579537 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"66d37527-e535-45de-9f92-0f95d9f7a856","Type":"ContainerDied","Data":"38da2448a8122076a841e0a9844d569c10cba14a4f39b69134394946a43fd1e1"} Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.579850 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38da2448a8122076a841e0a9844d569c10cba14a4f39b69134394946a43fd1e1" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.579610 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.584620 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:25 crc kubenswrapper[4940]: E1126 07:18:25.615432 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:25 crc kubenswrapper[4940]: E1126 07:18:25.615475 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:25 crc kubenswrapper[4940]: E1126 07:18:25.615891 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:25 crc kubenswrapper[4940]: E1126 07:18:25.616096 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:25 crc kubenswrapper[4940]: E1126 07:18:25.616256 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:25 crc kubenswrapper[4940]: E1126 07:18:25.616387 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:25 crc kubenswrapper[4940]: E1126 07:18:25.616508 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:25 crc kubenswrapper[4940]: E1126 07:18:25.616544 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.877232 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.878443 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.879169 4940 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.879687 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.941500 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.941601 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.941682 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.941726 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.941788 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.941827 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.942212 4940 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.942242 4940 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:25 crc kubenswrapper[4940]: I1126 07:18:25.942259 4940 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.590470 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.591114 4940 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f" exitCode=0 Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.591155 4940 scope.go:117] "RemoveContainer" containerID="1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.591353 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.607436 4940 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.607877 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.633552 4940 scope.go:117] "RemoveContainer" containerID="7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.658280 4940 scope.go:117] "RemoveContainer" containerID="a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.678824 4940 scope.go:117] "RemoveContainer" containerID="2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.702785 4940 scope.go:117] "RemoveContainer" containerID="420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.726148 4940 scope.go:117] "RemoveContainer" containerID="6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.749460 4940 scope.go:117] "RemoveContainer" containerID="1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30" Nov 26 07:18:26 crc kubenswrapper[4940]: E1126 07:18:26.749956 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\": container with ID starting with 1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30 not found: ID does not exist" containerID="1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.749991 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30"} err="failed to get container status \"1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\": rpc error: code = NotFound desc = could not find container \"1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30\": container with ID starting with 1f3ac016f9a0696263fe1081491af51b4448bc3e9c15e830f24a7b503c7dda30 not found: ID does not exist" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.750016 4940 scope.go:117] "RemoveContainer" containerID="7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40" Nov 26 07:18:26 crc kubenswrapper[4940]: E1126 07:18:26.750348 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\": container with ID starting with 7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40 not found: ID does not exist" containerID="7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.750371 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40"} err="failed to get container status \"7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\": rpc error: code = NotFound desc = could not find container \"7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40\": container with ID starting with 7f2e8d985b2574d931a32fae87b48f614f47a2530edf712591514bfd974d9b40 not found: ID does not exist" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.750390 4940 scope.go:117] "RemoveContainer" containerID="a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2" Nov 26 07:18:26 crc kubenswrapper[4940]: E1126 07:18:26.751586 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\": container with ID starting with a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2 not found: ID does not exist" containerID="a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.751624 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2"} err="failed to get container status \"a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\": rpc error: code = NotFound desc = could not find container \"a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2\": container with ID starting with a649d85de30bf34bca579d475530d7b6bbd0638a94b4bf1de9493a35b90d6cf2 not found: ID does not exist" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.751650 4940 scope.go:117] "RemoveContainer" containerID="2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea" Nov 26 07:18:26 crc kubenswrapper[4940]: E1126 07:18:26.752282 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\": container with ID starting with 2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea not found: ID does not exist" containerID="2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.752314 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea"} err="failed to get container status \"2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\": rpc error: code = NotFound desc = could not find container \"2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea\": container with ID starting with 2776f54d89c1451fa0c20cd4a7d9fd41808ad1ec9ba0daa44bdb830b83780eea not found: ID does not exist" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.752335 4940 scope.go:117] "RemoveContainer" containerID="420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f" Nov 26 07:18:26 crc kubenswrapper[4940]: E1126 07:18:26.752606 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\": container with ID starting with 420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f not found: ID does not exist" containerID="420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.752626 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f"} err="failed to get container status \"420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\": rpc error: code = NotFound desc = could not find container \"420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f\": container with ID starting with 420c628042aadb62c0146e1b7643e4a26e9dc663f69487e2ddb370310cd5118f not found: ID does not exist" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.752645 4940 scope.go:117] "RemoveContainer" containerID="6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf" Nov 26 07:18:26 crc kubenswrapper[4940]: E1126 07:18:26.753004 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\": container with ID starting with 6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf not found: ID does not exist" containerID="6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf" Nov 26 07:18:26 crc kubenswrapper[4940]: I1126 07:18:26.753025 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf"} err="failed to get container status \"6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\": rpc error: code = NotFound desc = could not find container \"6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf\": container with ID starting with 6655f4e23569e6e4e59e1137cc766dcc03f05bb4a204d434fa81bedbeab480bf not found: ID does not exist" Nov 26 07:18:27 crc kubenswrapper[4940]: I1126 07:18:27.173936 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 26 07:18:27 crc kubenswrapper[4940]: I1126 07:18:27.356509 4940 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod01a8836d-ba47-44ef-995e-f5bf2227dcd4"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod01a8836d-ba47-44ef-995e-f5bf2227dcd4] : Timed out while waiting for systemd to remove kubepods-besteffort-pod01a8836d_ba47_44ef_995e_f5bf2227dcd4.slice" Nov 26 07:18:29 crc kubenswrapper[4940]: I1126 07:18:29.171324 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:30 crc kubenswrapper[4940]: E1126 07:18:30.614920 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:30 crc kubenswrapper[4940]: E1126 07:18:30.614927 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:30 crc kubenswrapper[4940]: E1126 07:18:30.616230 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:30 crc kubenswrapper[4940]: E1126 07:18:30.616313 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:30 crc kubenswrapper[4940]: E1126 07:18:30.616582 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:30 crc kubenswrapper[4940]: E1126 07:18:30.616633 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:30 crc kubenswrapper[4940]: E1126 07:18:30.616699 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:30 crc kubenswrapper[4940]: E1126 07:18:30.616748 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:32 crc kubenswrapper[4940]: E1126 07:18:32.606582 4940 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:32 crc kubenswrapper[4940]: E1126 07:18:32.607221 4940 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:32 crc kubenswrapper[4940]: E1126 07:18:32.607504 4940 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:32 crc kubenswrapper[4940]: E1126 07:18:32.607736 4940 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:32 crc kubenswrapper[4940]: E1126 07:18:32.607991 4940 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:32 crc kubenswrapper[4940]: I1126 07:18:32.608014 4940 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 26 07:18:32 crc kubenswrapper[4940]: E1126 07:18:32.608234 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="200ms" Nov 26 07:18:32 crc kubenswrapper[4940]: E1126 07:18:32.809393 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="400ms" Nov 26 07:18:33 crc kubenswrapper[4940]: E1126 07:18:33.144437 4940 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.58:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b7d59cd48d8b3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 07:18:23.695345843 +0000 UTC m=+1405.215487472,LastTimestamp:2025-11-26 07:18:23.695345843 +0000 UTC m=+1405.215487472,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 07:18:33 crc kubenswrapper[4940]: E1126 07:18:33.210928 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="800ms" Nov 26 07:18:34 crc kubenswrapper[4940]: E1126 07:18:34.012255 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="1.6s" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.670729 4940 generic.go:334] "Generic (PLEG): container finished" podID="15d11cf9-51e8-4f1e-880e-86d9bba60224" containerID="c0db42120d7511268a3abd51edbe7223da64ad665cf756192a15c7f7f439fd35" exitCode=1 Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.670805 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" event={"ID":"15d11cf9-51e8-4f1e-880e-86d9bba60224","Type":"ContainerDied","Data":"c0db42120d7511268a3abd51edbe7223da64ad665cf756192a15c7f7f439fd35"} Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.671476 4940 scope.go:117] "RemoveContainer" containerID="c0db42120d7511268a3abd51edbe7223da64ad665cf756192a15c7f7f439fd35" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.671655 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.671845 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.672800 4940 generic.go:334] "Generic (PLEG): container finished" podID="4415b953-7e66-4d84-acde-32474c6d0ebf" containerID="48e7311a33f43c5297b58b8f4ae26329f15419cde8fa47af386f41bebc35df2a" exitCode=1 Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.672861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" event={"ID":"4415b953-7e66-4d84-acde-32474c6d0ebf","Type":"ContainerDied","Data":"48e7311a33f43c5297b58b8f4ae26329f15419cde8fa47af386f41bebc35df2a"} Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.673854 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.673932 4940 scope.go:117] "RemoveContainer" containerID="48e7311a33f43c5297b58b8f4ae26329f15419cde8fa47af386f41bebc35df2a" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.675062 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.675348 4940 generic.go:334] "Generic (PLEG): container finished" podID="0eca9bcc-5909-48e2-927e-b059359977d5" containerID="c66ae8354053587003a3020533f39b00b608d4e75aac5bea19e35aa9963916db" exitCode=1 Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.675374 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" event={"ID":"0eca9bcc-5909-48e2-927e-b059359977d5","Type":"ContainerDied","Data":"c66ae8354053587003a3020533f39b00b608d4e75aac5bea19e35aa9963916db"} Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.675433 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.675618 4940 scope.go:117] "RemoveContainer" containerID="c66ae8354053587003a3020533f39b00b608d4e75aac5bea19e35aa9963916db" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.675905 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.677173 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.677725 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:34 crc kubenswrapper[4940]: I1126 07:18:34.678021 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.377063 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.382166 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.614158 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="3.2s" Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.614610 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.614743 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.615204 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.615309 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.615543 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.615596 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.615999 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.616066 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.686430 4940 generic.go:334] "Generic (PLEG): container finished" podID="15d11cf9-51e8-4f1e-880e-86d9bba60224" containerID="d9250948d068ba5c71b0e012131ca443c11f8c17f9a4d9af940261d964d2c995" exitCode=1 Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.686488 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" event={"ID":"15d11cf9-51e8-4f1e-880e-86d9bba60224","Type":"ContainerDied","Data":"d9250948d068ba5c71b0e012131ca443c11f8c17f9a4d9af940261d964d2c995"} Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.686518 4940 scope.go:117] "RemoveContainer" containerID="c0db42120d7511268a3abd51edbe7223da64ad665cf756192a15c7f7f439fd35" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.687179 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.687321 4940 scope.go:117] "RemoveContainer" containerID="d9250948d068ba5c71b0e012131ca443c11f8c17f9a4d9af940261d964d2c995" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.687683 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.687841 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-5p8hf_openstack-operators(15d11cf9-51e8-4f1e-880e-86d9bba60224)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.695177 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.695387 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.695969 4940 generic.go:334] "Generic (PLEG): container finished" podID="4415b953-7e66-4d84-acde-32474c6d0ebf" containerID="bba1a12c3448d128ce989f7b0c72c53c7d0de1d5769bc2644482d3a2034f39f3" exitCode=1 Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.696029 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" event={"ID":"4415b953-7e66-4d84-acde-32474c6d0ebf","Type":"ContainerDied","Data":"bba1a12c3448d128ce989f7b0c72c53c7d0de1d5769bc2644482d3a2034f39f3"} Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.696325 4940 scope.go:117] "RemoveContainer" containerID="bba1a12c3448d128ce989f7b0c72c53c7d0de1d5769bc2644482d3a2034f39f3" Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.696490 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-6f8d6cc986-tlmk2_metallb-system(4415b953-7e66-4d84-acde-32474c6d0ebf)\"" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.696834 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.697187 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.697475 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.697746 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.699728 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.699779 4940 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701" exitCode=1 Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.699853 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701"} Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.700676 4940 scope.go:117] "RemoveContainer" containerID="9b74ae820ee87f92cdbe3031deb0dab49dfb13a9894a77877fd04590ae270701" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.701521 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.701899 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.702535 4940 generic.go:334] "Generic (PLEG): container finished" podID="0eca9bcc-5909-48e2-927e-b059359977d5" containerID="5fa7a110185f37a452a68e838b09d7165ff022379be0f23c29a7a9ad052d6c62" exitCode=1 Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.702564 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" event={"ID":"0eca9bcc-5909-48e2-927e-b059359977d5","Type":"ContainerDied","Data":"5fa7a110185f37a452a68e838b09d7165ff022379be0f23c29a7a9ad052d6c62"} Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.702834 4940 scope.go:117] "RemoveContainer" containerID="5fa7a110185f37a452a68e838b09d7165ff022379be0f23c29a7a9ad052d6c62" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.702937 4940 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.703734 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.703938 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: E1126 07:18:35.703007 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-ht6zr_openstack-operators(0eca9bcc-5909-48e2-927e-b059359977d5)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.704284 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.704569 4940 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.705080 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.705586 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.705896 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.754513 4940 scope.go:117] "RemoveContainer" containerID="48e7311a33f43c5297b58b8f4ae26329f15419cde8fa47af386f41bebc35df2a" Nov 26 07:18:35 crc kubenswrapper[4940]: I1126 07:18:35.826906 4940 scope.go:117] "RemoveContainer" containerID="c66ae8354053587003a3020533f39b00b608d4e75aac5bea19e35aa9963916db" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.718830 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.718921 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5171ff367b2173499d8f0fbaa40775913211706c0b8a5f582f1f10d59c04704a"} Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.719939 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.720188 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.720452 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.720668 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.721267 4940 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.722473 4940 scope.go:117] "RemoveContainer" containerID="5fa7a110185f37a452a68e838b09d7165ff022379be0f23c29a7a9ad052d6c62" Nov 26 07:18:36 crc kubenswrapper[4940]: E1126 07:18:36.722736 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-ht6zr_openstack-operators(0eca9bcc-5909-48e2-927e-b059359977d5)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.723135 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.724110 4940 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.724334 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.724878 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.725674 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.726850 4940 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.727499 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.727857 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.729143 4940 scope.go:117] "RemoveContainer" containerID="d9250948d068ba5c71b0e012131ca443c11f8c17f9a4d9af940261d964d2c995" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.729462 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: I1126 07:18:36.729977 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:36 crc kubenswrapper[4940]: E1126 07:18:36.729987 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-5p8hf_openstack-operators(15d11cf9-51e8-4f1e-880e-86d9bba60224)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.164927 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.166333 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.168417 4940 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.169302 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.169858 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.170476 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.191433 4940 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.191479 4940 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:38 crc kubenswrapper[4940]: E1126 07:18:38.192120 4940 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.192817 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:38 crc kubenswrapper[4940]: W1126 07:18:38.239845 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-0afc4ed504f311e774b445177edf2ec11849784b4a9fd062d75e99f741f262ec WatchSource:0}: Error finding container 0afc4ed504f311e774b445177edf2ec11849784b4a9fd062d75e99f741f262ec: Status 404 returned error can't find the container with id 0afc4ed504f311e774b445177edf2ec11849784b4a9fd062d75e99f741f262ec Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.747064 4940 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="20d2c6ae57be7ffa261724d829b32375e6869d9b506614e580acb5193e76a22a" exitCode=0 Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.747201 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"20d2c6ae57be7ffa261724d829b32375e6869d9b506614e580acb5193e76a22a"} Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.747411 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0afc4ed504f311e774b445177edf2ec11849784b4a9fd062d75e99f741f262ec"} Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.747666 4940 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.747680 4940 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:38 crc kubenswrapper[4940]: E1126 07:18:38.748620 4940 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.748662 4940 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.749480 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.749920 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.750684 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: I1126 07:18:38.751454 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:38 crc kubenswrapper[4940]: E1126 07:18:38.824928 4940 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="6.4s" Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.183118 4940 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.183561 4940 status_manager.go:851] "Failed to get status for pod" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f8d6cc986-tlmk2\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.183823 4940 status_manager.go:851] "Failed to get status for pod" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-ht6zr\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.184127 4940 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.184341 4940 status_manager.go:851] "Failed to get status for pod" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.184718 4940 status_manager.go:851] "Failed to get status for pod" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-5p8hf\": dial tcp 38.102.83.58:6443: connect: connection refused" Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.766187 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2024fe0167993c0abee8902ff208cd76a5548f7c693bcc529ad40e29e161c982"} Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.766504 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b6bfee43fbd48f61899e55bc33bcfb41e599af83fb973399b37eaa4a3d065446"} Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.766515 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c01021d1e5332aad1c8dcc450a9afe0407ff1fa1857bd27095c88fdfd08e42f3"} Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.961608 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:18:39 crc kubenswrapper[4940]: I1126 07:18:39.962429 4940 scope.go:117] "RemoveContainer" containerID="bba1a12c3448d128ce989f7b0c72c53c7d0de1d5769bc2644482d3a2034f39f3" Nov 26 07:18:39 crc kubenswrapper[4940]: E1126 07:18:39.962622 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-6f8d6cc986-tlmk2_metallb-system(4415b953-7e66-4d84-acde-32474c6d0ebf)\"" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" Nov 26 07:18:40 crc kubenswrapper[4940]: E1126 07:18:40.615071 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:40 crc kubenswrapper[4940]: E1126 07:18:40.615099 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:40 crc kubenswrapper[4940]: E1126 07:18:40.615703 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:40 crc kubenswrapper[4940]: E1126 07:18:40.615730 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:40 crc kubenswrapper[4940]: E1126 07:18:40.616077 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:40 crc kubenswrapper[4940]: E1126 07:18:40.616147 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:40 crc kubenswrapper[4940]: E1126 07:18:40.616325 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:40 crc kubenswrapper[4940]: E1126 07:18:40.616363 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:40 crc kubenswrapper[4940]: I1126 07:18:40.777740 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"51f32a82aea26411dea96b23678ab71dd3f182e0cf2280c15f57b1976acb58ef"} Nov 26 07:18:40 crc kubenswrapper[4940]: I1126 07:18:40.777788 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"908de51d2a0582ca2e51f7069d131f31044514347757890c4c6ca8acae856878"} Nov 26 07:18:40 crc kubenswrapper[4940]: I1126 07:18:40.777901 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:40 crc kubenswrapper[4940]: I1126 07:18:40.778001 4940 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:40 crc kubenswrapper[4940]: I1126 07:18:40.778023 4940 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:43 crc kubenswrapper[4940]: I1126 07:18:43.043232 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 07:18:43 crc kubenswrapper[4940]: I1126 07:18:43.192896 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:43 crc kubenswrapper[4940]: I1126 07:18:43.193061 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:43 crc kubenswrapper[4940]: I1126 07:18:43.198645 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.834329 4940 generic.go:334] "Generic (PLEG): container finished" podID="e8c9bb46-a618-437a-914b-6cb9c1ede58c" containerID="25890392692f29e5a659d515eb3bd1d708efd5f84abcee39e0bd480c92fb5a26" exitCode=1 Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.834421 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" event={"ID":"e8c9bb46-a618-437a-914b-6cb9c1ede58c","Type":"ContainerDied","Data":"25890392692f29e5a659d515eb3bd1d708efd5f84abcee39e0bd480c92fb5a26"} Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.835097 4940 scope.go:117] "RemoveContainer" containerID="25890392692f29e5a659d515eb3bd1d708efd5f84abcee39e0bd480c92fb5a26" Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.836386 4940 generic.go:334] "Generic (PLEG): container finished" podID="869e95c8-91e8-4b5a-8eda-35c045ee8cbe" containerID="f9bb41181d2edd1504186b07395945c6d729e7b15f2b5b788d649bea02cff690" exitCode=1 Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.836477 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" event={"ID":"869e95c8-91e8-4b5a-8eda-35c045ee8cbe","Type":"ContainerDied","Data":"f9bb41181d2edd1504186b07395945c6d729e7b15f2b5b788d649bea02cff690"} Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.836943 4940 scope.go:117] "RemoveContainer" containerID="f9bb41181d2edd1504186b07395945c6d729e7b15f2b5b788d649bea02cff690" Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.838703 4940 generic.go:334] "Generic (PLEG): container finished" podID="53456206-67c0-4503-b72f-909a3ec07b2a" containerID="5bfefbe026ea6a73cb5c1cb3b4ff2a611c50c9d151f51528495ed8b45b0e52e4" exitCode=1 Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.838731 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" event={"ID":"53456206-67c0-4503-b72f-909a3ec07b2a","Type":"ContainerDied","Data":"5bfefbe026ea6a73cb5c1cb3b4ff2a611c50c9d151f51528495ed8b45b0e52e4"} Nov 26 07:18:44 crc kubenswrapper[4940]: I1126 07:18:44.839112 4940 scope.go:117] "RemoveContainer" containerID="5bfefbe026ea6a73cb5c1cb3b4ff2a611c50c9d151f51528495ed8b45b0e52e4" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.280194 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.283576 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.376961 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.377081 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.377773 4940 scope.go:117] "RemoveContainer" containerID="5fa7a110185f37a452a68e838b09d7165ff022379be0f23c29a7a9ad052d6c62" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.383007 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.383071 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.383587 4940 scope.go:117] "RemoveContainer" containerID="d9250948d068ba5c71b0e012131ca443c11f8c17f9a4d9af940261d964d2c995" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.541021 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.541241 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.615516 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.615549 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.615783 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.615861 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.615991 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.616139 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.616142 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.616294 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-k9l7t" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.788520 4940 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.825292 4940 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="a139d93c-1e31-4f9a-9aff-136926e53860" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.855371 4940 generic.go:334] "Generic (PLEG): container finished" podID="0eca9bcc-5909-48e2-927e-b059359977d5" containerID="eb2c0e24126f4a93298401a8ddd378e90fedcb87cd4e47890243297f16ed4f9a" exitCode=1 Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.855494 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" event={"ID":"0eca9bcc-5909-48e2-927e-b059359977d5","Type":"ContainerDied","Data":"eb2c0e24126f4a93298401a8ddd378e90fedcb87cd4e47890243297f16ed4f9a"} Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.855532 4940 scope.go:117] "RemoveContainer" containerID="5fa7a110185f37a452a68e838b09d7165ff022379be0f23c29a7a9ad052d6c62" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.856155 4940 scope.go:117] "RemoveContainer" containerID="eb2c0e24126f4a93298401a8ddd378e90fedcb87cd4e47890243297f16ed4f9a" Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.856509 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-ht6zr_openstack-operators(0eca9bcc-5909-48e2-927e-b059359977d5)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.859571 4940 generic.go:334] "Generic (PLEG): container finished" podID="df395369-43ff-4cd2-af6e-60a9a96a4d66" containerID="e888cf5b6033f5ecf003b8fbe88a0b40d5ac748a81f955d38898d4bbeffd08a0" exitCode=1 Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.859658 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" event={"ID":"df395369-43ff-4cd2-af6e-60a9a96a4d66","Type":"ContainerDied","Data":"e888cf5b6033f5ecf003b8fbe88a0b40d5ac748a81f955d38898d4bbeffd08a0"} Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.860178 4940 scope.go:117] "RemoveContainer" containerID="e888cf5b6033f5ecf003b8fbe88a0b40d5ac748a81f955d38898d4bbeffd08a0" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.868083 4940 generic.go:334] "Generic (PLEG): container finished" podID="e46c7b1d-e02f-4807-a650-1038eba64162" containerID="113b8f0f73fd0bfdb692f5aca68c205f381d14458d5ac47d9949ce94e3318770" exitCode=1 Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.868169 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" event={"ID":"e46c7b1d-e02f-4807-a650-1038eba64162","Type":"ContainerDied","Data":"113b8f0f73fd0bfdb692f5aca68c205f381d14458d5ac47d9949ce94e3318770"} Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.870454 4940 scope.go:117] "RemoveContainer" containerID="113b8f0f73fd0bfdb692f5aca68c205f381d14458d5ac47d9949ce94e3318770" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.870790 4940 generic.go:334] "Generic (PLEG): container finished" podID="15d11cf9-51e8-4f1e-880e-86d9bba60224" containerID="c197f7b26e412ffceddb13315c7001ac431ad74018789b7199e6b9622a752663" exitCode=1 Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.870897 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" event={"ID":"15d11cf9-51e8-4f1e-880e-86d9bba60224","Type":"ContainerDied","Data":"c197f7b26e412ffceddb13315c7001ac431ad74018789b7199e6b9622a752663"} Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.871731 4940 scope.go:117] "RemoveContainer" containerID="c197f7b26e412ffceddb13315c7001ac431ad74018789b7199e6b9622a752663" Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.872160 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-5p8hf_openstack-operators(15d11cf9-51e8-4f1e-880e-86d9bba60224)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.873493 4940 generic.go:334] "Generic (PLEG): container finished" podID="e8c9bb46-a618-437a-914b-6cb9c1ede58c" containerID="ac48e187fbac899a570e74f5231f7277c945f7367cd10441dd2d9c1dc5e9497f" exitCode=1 Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.873555 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" event={"ID":"e8c9bb46-a618-437a-914b-6cb9c1ede58c","Type":"ContainerDied","Data":"ac48e187fbac899a570e74f5231f7277c945f7367cd10441dd2d9c1dc5e9497f"} Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.874064 4940 scope.go:117] "RemoveContainer" containerID="ac48e187fbac899a570e74f5231f7277c945f7367cd10441dd2d9c1dc5e9497f" Nov 26 07:18:45 crc kubenswrapper[4940]: E1126 07:18:45.874374 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-zrxd8_openstack-operators(e8c9bb46-a618-437a-914b-6cb9c1ede58c)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" podUID="e8c9bb46-a618-437a-914b-6cb9c1ede58c" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.877418 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" event={"ID":"869e95c8-91e8-4b5a-8eda-35c045ee8cbe","Type":"ContainerStarted","Data":"5c559ea2d32461a1979e04910393677008308c9968a760eeb207cf5a07bc45eb"} Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.878097 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.894832 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" event={"ID":"53456206-67c0-4503-b72f-909a3ec07b2a","Type":"ContainerStarted","Data":"2a3996f50782396555d4de61070747174868c1641d0f9e10e17c37814ffb9056"} Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.894867 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.895386 4940 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.895651 4940 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.904192 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.910261 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:18:45 crc kubenswrapper[4940]: I1126 07:18:45.940538 4940 scope.go:117] "RemoveContainer" containerID="d9250948d068ba5c71b0e012131ca443c11f8c17f9a4d9af940261d964d2c995" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.008957 4940 scope.go:117] "RemoveContainer" containerID="25890392692f29e5a659d515eb3bd1d708efd5f84abcee39e0bd480c92fb5a26" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.103196 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.276394 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") pod \"1ae63b19-f186-430b-87f0-d058d2efa83c\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.276459 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-cache\") pod \"1ae63b19-f186-430b-87f0-d058d2efa83c\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.276521 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"1ae63b19-f186-430b-87f0-d058d2efa83c\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.276587 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-lock\") pod \"1ae63b19-f186-430b-87f0-d058d2efa83c\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.276653 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ttzj\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-kube-api-access-4ttzj\") pod \"1ae63b19-f186-430b-87f0-d058d2efa83c\" (UID: \"1ae63b19-f186-430b-87f0-d058d2efa83c\") " Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.278030 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-cache" (OuterVolumeSpecName: "cache") pod "1ae63b19-f186-430b-87f0-d058d2efa83c" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.278759 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-lock" (OuterVolumeSpecName: "lock") pod "1ae63b19-f186-430b-87f0-d058d2efa83c" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.282599 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "1ae63b19-f186-430b-87f0-d058d2efa83c" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.282652 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-kube-api-access-4ttzj" (OuterVolumeSpecName: "kube-api-access-4ttzj") pod "1ae63b19-f186-430b-87f0-d058d2efa83c" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c"). InnerVolumeSpecName "kube-api-access-4ttzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.287931 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "swift") pod "1ae63b19-f186-430b-87f0-d058d2efa83c" (UID: "1ae63b19-f186-430b-87f0-d058d2efa83c"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.378261 4940 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-lock\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.378287 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ttzj\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-kube-api-access-4ttzj\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.378299 4940 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ae63b19-f186-430b-87f0-d058d2efa83c-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.378307 4940 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1ae63b19-f186-430b-87f0-d058d2efa83c-cache\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.378337 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.399302 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.479753 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.905696 4940 generic.go:334] "Generic (PLEG): container finished" podID="86296495-65bc-46a3-a775-621a7bf1745f" containerID="f3b11de0cba5fabe5e4f8b771ef0ba514601283f5a893290ce592c51a22c14c2" exitCode=1 Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.905772 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" event={"ID":"86296495-65bc-46a3-a775-621a7bf1745f","Type":"ContainerDied","Data":"f3b11de0cba5fabe5e4f8b771ef0ba514601283f5a893290ce592c51a22c14c2"} Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.906380 4940 scope.go:117] "RemoveContainer" containerID="f3b11de0cba5fabe5e4f8b771ef0ba514601283f5a893290ce592c51a22c14c2" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.924892 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1ae63b19-f186-430b-87f0-d058d2efa83c","Type":"ContainerDied","Data":"4a2662f8ac9a95a04c533b91be7382a4f479563951aae471c80a7d6a5240e657"} Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.924949 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.924963 4940 scope.go:117] "RemoveContainer" containerID="5968ea4146aabb9243cd7bc0fcedda38425122a8df83965feee0250ed0d15f33" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.934567 4940 generic.go:334] "Generic (PLEG): container finished" podID="7756325b-5cc4-4eb6-ae14-5f71924c3413" containerID="66e7036b9edbb973ac2dfbc3f9f84d9976a9419baa0b52a8f311c421d91c0fd1" exitCode=1 Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.934895 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" event={"ID":"7756325b-5cc4-4eb6-ae14-5f71924c3413","Type":"ContainerDied","Data":"66e7036b9edbb973ac2dfbc3f9f84d9976a9419baa0b52a8f311c421d91c0fd1"} Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.935423 4940 scope.go:117] "RemoveContainer" containerID="66e7036b9edbb973ac2dfbc3f9f84d9976a9419baa0b52a8f311c421d91c0fd1" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.940143 4940 generic.go:334] "Generic (PLEG): container finished" podID="327827f9-8ca3-4d2e-8478-ace9eb784b21" containerID="a18a90bdf99dc5707aa80d198343866fac1bb9413286ee9c066621e591d80395" exitCode=1 Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.940219 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" event={"ID":"327827f9-8ca3-4d2e-8478-ace9eb784b21","Type":"ContainerDied","Data":"a18a90bdf99dc5707aa80d198343866fac1bb9413286ee9c066621e591d80395"} Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.940610 4940 scope.go:117] "RemoveContainer" containerID="a18a90bdf99dc5707aa80d198343866fac1bb9413286ee9c066621e591d80395" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.947485 4940 generic.go:334] "Generic (PLEG): container finished" podID="e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4" containerID="770f0e888ed9d736e24b973a0db645ac5c63d4728e3246d8f6213419e6924c37" exitCode=1 Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.947568 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" event={"ID":"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4","Type":"ContainerDied","Data":"770f0e888ed9d736e24b973a0db645ac5c63d4728e3246d8f6213419e6924c37"} Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.951273 4940 scope.go:117] "RemoveContainer" containerID="770f0e888ed9d736e24b973a0db645ac5c63d4728e3246d8f6213419e6924c37" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.955865 4940 generic.go:334] "Generic (PLEG): container finished" podID="67bde2c7-9e64-469e-b400-071b32f065da" containerID="9db78c16f5b35cf049201d82712db21f533c70a534a80df60cc4689a577b7131" exitCode=1 Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.956259 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" event={"ID":"67bde2c7-9e64-469e-b400-071b32f065da","Type":"ContainerDied","Data":"9db78c16f5b35cf049201d82712db21f533c70a534a80df60cc4689a577b7131"} Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.956918 4940 scope.go:117] "RemoveContainer" containerID="9db78c16f5b35cf049201d82712db21f533c70a534a80df60cc4689a577b7131" Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.986392 4940 generic.go:334] "Generic (PLEG): container finished" podID="df395369-43ff-4cd2-af6e-60a9a96a4d66" containerID="7fd50f7016eb2306511a570e0fd391c32c2de106f6fd5c7f0f303eba054bc644" exitCode=1 Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.986490 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" event={"ID":"df395369-43ff-4cd2-af6e-60a9a96a4d66","Type":"ContainerDied","Data":"7fd50f7016eb2306511a570e0fd391c32c2de106f6fd5c7f0f303eba054bc644"} Nov 26 07:18:46 crc kubenswrapper[4940]: I1126 07:18:46.987120 4940 scope.go:117] "RemoveContainer" containerID="7fd50f7016eb2306511a570e0fd391c32c2de106f6fd5c7f0f303eba054bc644" Nov 26 07:18:46 crc kubenswrapper[4940]: E1126 07:18:46.987341 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-7dkrp_openstack-operators(df395369-43ff-4cd2-af6e-60a9a96a4d66)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" podUID="df395369-43ff-4cd2-af6e-60a9a96a4d66" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.002626 4940 generic.go:334] "Generic (PLEG): container finished" podID="9527c833-8bce-440b-b4e5-ca0a08ef7d28" containerID="f6a59ff24ca24b60c8df863f4660f2261fca6f6898f69caf7a39cd40ea7c334b" exitCode=1 Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.002740 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" event={"ID":"9527c833-8bce-440b-b4e5-ca0a08ef7d28","Type":"ContainerDied","Data":"f6a59ff24ca24b60c8df863f4660f2261fca6f6898f69caf7a39cd40ea7c334b"} Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.003232 4940 scope.go:117] "RemoveContainer" containerID="f6a59ff24ca24b60c8df863f4660f2261fca6f6898f69caf7a39cd40ea7c334b" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.009223 4940 generic.go:334] "Generic (PLEG): container finished" podID="b53f82af-849a-47b4-a878-676055ad11ef" containerID="179f7cba17ee6f72630f8e9d0839668d4b1fc29ece555b99ba134421ede5478d" exitCode=1 Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.009303 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" event={"ID":"b53f82af-849a-47b4-a878-676055ad11ef","Type":"ContainerDied","Data":"179f7cba17ee6f72630f8e9d0839668d4b1fc29ece555b99ba134421ede5478d"} Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.009849 4940 scope.go:117] "RemoveContainer" containerID="179f7cba17ee6f72630f8e9d0839668d4b1fc29ece555b99ba134421ede5478d" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.011948 4940 generic.go:334] "Generic (PLEG): container finished" podID="e46c7b1d-e02f-4807-a650-1038eba64162" containerID="866f583b325e790dfade7bcadcc88883562243a5f772f9878e2b3321b4a16d09" exitCode=1 Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.011998 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" event={"ID":"e46c7b1d-e02f-4807-a650-1038eba64162","Type":"ContainerDied","Data":"866f583b325e790dfade7bcadcc88883562243a5f772f9878e2b3321b4a16d09"} Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.012466 4940 scope.go:117] "RemoveContainer" containerID="866f583b325e790dfade7bcadcc88883562243a5f772f9878e2b3321b4a16d09" Nov 26 07:18:47 crc kubenswrapper[4940]: E1126 07:18:47.012660 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-qwlpn_openstack-operators(e46c7b1d-e02f-4807-a650-1038eba64162)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" podUID="e46c7b1d-e02f-4807-a650-1038eba64162" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.018333 4940 generic.go:334] "Generic (PLEG): container finished" podID="2b2e7f46-8ad4-4361-8e95-76aa1e091665" containerID="6be974cfc983b06d8579e128e6bb85eceb785f44210e86cd7c82d9cdeed56b9f" exitCode=1 Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.018488 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" event={"ID":"2b2e7f46-8ad4-4361-8e95-76aa1e091665","Type":"ContainerDied","Data":"6be974cfc983b06d8579e128e6bb85eceb785f44210e86cd7c82d9cdeed56b9f"} Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.019375 4940 scope.go:117] "RemoveContainer" containerID="6be974cfc983b06d8579e128e6bb85eceb785f44210e86cd7c82d9cdeed56b9f" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.028360 4940 generic.go:334] "Generic (PLEG): container finished" podID="7ce6057b-0d67-48fc-9d34-b6574eda6978" containerID="f537c33fd1350f4fc54f21bbf168f336ab2ca52bb8da5666d8584afe96ecb173" exitCode=1 Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.028415 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" event={"ID":"7ce6057b-0d67-48fc-9d34-b6574eda6978","Type":"ContainerDied","Data":"f537c33fd1350f4fc54f21bbf168f336ab2ca52bb8da5666d8584afe96ecb173"} Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.030285 4940 scope.go:117] "RemoveContainer" containerID="f537c33fd1350f4fc54f21bbf168f336ab2ca52bb8da5666d8584afe96ecb173" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.033746 4940 scope.go:117] "RemoveContainer" containerID="18fd11a465765ba259762706e393ce42274d6d5ab6b21c460bed17a0534150bb" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.044431 4940 scope.go:117] "RemoveContainer" containerID="eb2c0e24126f4a93298401a8ddd378e90fedcb87cd4e47890243297f16ed4f9a" Nov 26 07:18:47 crc kubenswrapper[4940]: E1126 07:18:47.044659 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-ht6zr_openstack-operators(0eca9bcc-5909-48e2-927e-b059359977d5)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.046848 4940 generic.go:334] "Generic (PLEG): container finished" podID="51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5" containerID="2442ed10e98338fffcf8e1b946ea1b87654a5e145291979c0df657ceb62cb316" exitCode=1 Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.047297 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" event={"ID":"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5","Type":"ContainerDied","Data":"2442ed10e98338fffcf8e1b946ea1b87654a5e145291979c0df657ceb62cb316"} Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.047524 4940 scope.go:117] "RemoveContainer" containerID="2442ed10e98338fffcf8e1b946ea1b87654a5e145291979c0df657ceb62cb316" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.047950 4940 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.047966 4940 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="d760c346-dd1e-45e8-ae78-b53338b7d1eb" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.098346 4940 scope.go:117] "RemoveContainer" containerID="85ac1cbb7cd8cd7a99e39dcf3fc62fbf9041ad24323c288d9baf670d703ac447" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.179468 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-k9l7t_5f26eaaa-63b0-491d-b664-56edff3be80c/ovs-vswitchd/0.log" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.180815 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.208763 4940 scope.go:117] "RemoveContainer" containerID="e58705bee99fbf1a356b726ecc7d48c7a1d44cee6e432d30db17de1a4b1bed0c" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.289281 4940 scope.go:117] "RemoveContainer" containerID="184283b7cf9ea6a22c37b80e59b65273e5c1e54072b94ba5e98ff402061ac3b7" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.294551 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-lib\") pod \"5f26eaaa-63b0-491d-b664-56edff3be80c\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.294606 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzrdv\" (UniqueName: \"kubernetes.io/projected/5f26eaaa-63b0-491d-b664-56edff3be80c-kube-api-access-qzrdv\") pod \"5f26eaaa-63b0-491d-b664-56edff3be80c\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.294631 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f26eaaa-63b0-491d-b664-56edff3be80c-scripts\") pod \"5f26eaaa-63b0-491d-b664-56edff3be80c\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.294674 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-etc-ovs\") pod \"5f26eaaa-63b0-491d-b664-56edff3be80c\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.294703 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-run\") pod \"5f26eaaa-63b0-491d-b664-56edff3be80c\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.294741 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-log\") pod \"5f26eaaa-63b0-491d-b664-56edff3be80c\" (UID: \"5f26eaaa-63b0-491d-b664-56edff3be80c\") " Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.295618 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-lib" (OuterVolumeSpecName: "var-lib") pod "5f26eaaa-63b0-491d-b664-56edff3be80c" (UID: "5f26eaaa-63b0-491d-b664-56edff3be80c"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.295990 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-run" (OuterVolumeSpecName: "var-run") pod "5f26eaaa-63b0-491d-b664-56edff3be80c" (UID: "5f26eaaa-63b0-491d-b664-56edff3be80c"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.296266 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-log" (OuterVolumeSpecName: "var-log") pod "5f26eaaa-63b0-491d-b664-56edff3be80c" (UID: "5f26eaaa-63b0-491d-b664-56edff3be80c"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.296345 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "5f26eaaa-63b0-491d-b664-56edff3be80c" (UID: "5f26eaaa-63b0-491d-b664-56edff3be80c"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.297379 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f26eaaa-63b0-491d-b664-56edff3be80c-scripts" (OuterVolumeSpecName: "scripts") pod "5f26eaaa-63b0-491d-b664-56edff3be80c" (UID: "5f26eaaa-63b0-491d-b664-56edff3be80c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.302285 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f26eaaa-63b0-491d-b664-56edff3be80c-kube-api-access-qzrdv" (OuterVolumeSpecName: "kube-api-access-qzrdv") pod "5f26eaaa-63b0-491d-b664-56edff3be80c" (UID: "5f26eaaa-63b0-491d-b664-56edff3be80c"). InnerVolumeSpecName "kube-api-access-qzrdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.328493 4940 scope.go:117] "RemoveContainer" containerID="a370f34c9d0093cf91dd550d53235e78ccb9de14c218c3ae695b5536b1207fa8" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.375201 4940 scope.go:117] "RemoveContainer" containerID="ddacc922294d2d9560d232e885d82b0359325dab5663024167d5a82671b91dfe" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.396026 4940 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.396186 4940 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-lib\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.396201 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzrdv\" (UniqueName: \"kubernetes.io/projected/5f26eaaa-63b0-491d-b664-56edff3be80c-kube-api-access-qzrdv\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.396214 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f26eaaa-63b0-491d-b664-56edff3be80c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.396227 4940 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.396238 4940 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5f26eaaa-63b0-491d-b664-56edff3be80c-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.404211 4940 scope.go:117] "RemoveContainer" containerID="35af38ce835d55824412db931544d40f54c6a971946a5d4b50c5dfa394ce269c" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.445691 4940 scope.go:117] "RemoveContainer" containerID="f7ed6711acf7fdec231f79586d9ef7609a087d63274a505334d0634157294d0a" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.467322 4940 scope.go:117] "RemoveContainer" containerID="5c0f62d433b891f3245b23b16d6de813e3eab74a72c1c1978aa9aadf0b7c327d" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.495518 4940 scope.go:117] "RemoveContainer" containerID="8d0b0bedcd7f34458be64dffa9614d1cddea6dd92857846272fc36941c4d41da" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.518427 4940 scope.go:117] "RemoveContainer" containerID="7d1c1ce5f4f86fefb0522c2b4bb84960ef9691ba82d0c28a857ae9348d2ead68" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.548186 4940 scope.go:117] "RemoveContainer" containerID="7a9b2378bf609eb570091f1edebe633dcf651482009fd78a7715a03ed6c3da04" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.563545 4940 scope.go:117] "RemoveContainer" containerID="1fb92f00872a9aa36d49326d1cf65db8a9032a280f9587ed0d9216aef9800d95" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.576598 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.580155 4940 scope.go:117] "RemoveContainer" containerID="5832f00fdc7b07d3b583da1f514fceb0172f7918ac8ced3a03dda26a1c0934ea" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.595177 4940 scope.go:117] "RemoveContainer" containerID="e888cf5b6033f5ecf003b8fbe88a0b40d5ac748a81f955d38898d4bbeffd08a0" Nov 26 07:18:47 crc kubenswrapper[4940]: I1126 07:18:47.623770 4940 scope.go:117] "RemoveContainer" containerID="113b8f0f73fd0bfdb692f5aca68c205f381d14458d5ac47d9949ce94e3318770" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.056905 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-k9l7t_5f26eaaa-63b0-491d-b664-56edff3be80c/ovs-vswitchd/0.log" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.057760 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-k9l7t" event={"ID":"5f26eaaa-63b0-491d-b664-56edff3be80c","Type":"ContainerDied","Data":"7008a7f727d0ac277f063ee8b3bf7aff4869d564f571be3cdbf684b868433a99"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.057802 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-k9l7t" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.057823 4940 scope.go:117] "RemoveContainer" containerID="b1f586260042608027fb0e3f3600c45cec793e6ff12ee8146e1311f13b2c8edf" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.069160 4940 generic.go:334] "Generic (PLEG): container finished" podID="b53f82af-849a-47b4-a878-676055ad11ef" containerID="5bf48035f1dc081adc3b475f5ac9c0f42e7241c3239112cfc3df0c22ab9b3ca2" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.069324 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" event={"ID":"b53f82af-849a-47b4-a878-676055ad11ef","Type":"ContainerDied","Data":"5bf48035f1dc081adc3b475f5ac9c0f42e7241c3239112cfc3df0c22ab9b3ca2"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.070355 4940 scope.go:117] "RemoveContainer" containerID="5bf48035f1dc081adc3b475f5ac9c0f42e7241c3239112cfc3df0c22ab9b3ca2" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.071674 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-97dsx_openstack-operators(b53f82af-849a-47b4-a878-676055ad11ef)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" podUID="b53f82af-849a-47b4-a878-676055ad11ef" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.091019 4940 generic.go:334] "Generic (PLEG): container finished" podID="327827f9-8ca3-4d2e-8478-ace9eb784b21" containerID="60be7d9a7140bc14412456bd45e1620a443c10884f02b8eb806bb6080c6608d2" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.091193 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" event={"ID":"327827f9-8ca3-4d2e-8478-ace9eb784b21","Type":"ContainerDied","Data":"60be7d9a7140bc14412456bd45e1620a443c10884f02b8eb806bb6080c6608d2"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.092272 4940 scope.go:117] "RemoveContainer" containerID="60be7d9a7140bc14412456bd45e1620a443c10884f02b8eb806bb6080c6608d2" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.092699 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-55vwf_openstack-operators(327827f9-8ca3-4d2e-8478-ace9eb784b21)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" podUID="327827f9-8ca3-4d2e-8478-ace9eb784b21" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.097770 4940 scope.go:117] "RemoveContainer" containerID="3e13c0b4cf6a68601f0dc52d347bc0ec7c13ae227f0c730c53a8328517633aee" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.101251 4940 scope.go:117] "RemoveContainer" containerID="7fd50f7016eb2306511a570e0fd391c32c2de106f6fd5c7f0f303eba054bc644" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.101653 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-7dkrp_openstack-operators(df395369-43ff-4cd2-af6e-60a9a96a4d66)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" podUID="df395369-43ff-4cd2-af6e-60a9a96a4d66" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.105564 4940 generic.go:334] "Generic (PLEG): container finished" podID="e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4" containerID="b9f8f099156e231d8e82a0d4acde2d14b09f087a6cf0a126e58ced6fbea9dae1" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.105630 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" event={"ID":"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4","Type":"ContainerDied","Data":"b9f8f099156e231d8e82a0d4acde2d14b09f087a6cf0a126e58ced6fbea9dae1"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.106165 4940 scope.go:117] "RemoveContainer" containerID="b9f8f099156e231d8e82a0d4acde2d14b09f087a6cf0a126e58ced6fbea9dae1" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.106413 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-jxxwj_openstack-operators(e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" podUID="e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.114252 4940 generic.go:334] "Generic (PLEG): container finished" podID="86296495-65bc-46a3-a775-621a7bf1745f" containerID="4bc9e973f54133c60a6bd459b6698a4ed7eef8ce408382a226113cb0208a53e2" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.114330 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" event={"ID":"86296495-65bc-46a3-a775-621a7bf1745f","Type":"ContainerDied","Data":"4bc9e973f54133c60a6bd459b6698a4ed7eef8ce408382a226113cb0208a53e2"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.114901 4940 scope.go:117] "RemoveContainer" containerID="4bc9e973f54133c60a6bd459b6698a4ed7eef8ce408382a226113cb0208a53e2" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.115161 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-hxbtm_openstack-operators(86296495-65bc-46a3-a775-621a7bf1745f)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" podUID="86296495-65bc-46a3-a775-621a7bf1745f" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.117118 4940 generic.go:334] "Generic (PLEG): container finished" podID="2b2e7f46-8ad4-4361-8e95-76aa1e091665" containerID="5ef65377077da8c4fe707e50d1a6bd73746253ef84de16f52493d676ac59e262" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.117182 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" event={"ID":"2b2e7f46-8ad4-4361-8e95-76aa1e091665","Type":"ContainerDied","Data":"5ef65377077da8c4fe707e50d1a6bd73746253ef84de16f52493d676ac59e262"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.117837 4940 scope.go:117] "RemoveContainer" containerID="5ef65377077da8c4fe707e50d1a6bd73746253ef84de16f52493d676ac59e262" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.118050 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-x9ffm_openstack-operators(2b2e7f46-8ad4-4361-8e95-76aa1e091665)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" podUID="2b2e7f46-8ad4-4361-8e95-76aa1e091665" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.119216 4940 generic.go:334] "Generic (PLEG): container finished" podID="68cd61b8-efae-4aef-bd7a-3e90201b5809" containerID="d36bc06cba1e5be4028d78492a3e4374456c16acd82e71d9f36c6c42021bfad6" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.119252 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" event={"ID":"68cd61b8-efae-4aef-bd7a-3e90201b5809","Type":"ContainerDied","Data":"d36bc06cba1e5be4028d78492a3e4374456c16acd82e71d9f36c6c42021bfad6"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.119845 4940 scope.go:117] "RemoveContainer" containerID="d36bc06cba1e5be4028d78492a3e4374456c16acd82e71d9f36c6c42021bfad6" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.123335 4940 generic.go:334] "Generic (PLEG): container finished" podID="5ac0ef91-42dc-4bed-b5bc-4c668b3249cc" containerID="4a27642782b2e952a0f5370d55ba11f8ec629e7e6052c86858b54c7ee7a85af9" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.123388 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" event={"ID":"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc","Type":"ContainerDied","Data":"4a27642782b2e952a0f5370d55ba11f8ec629e7e6052c86858b54c7ee7a85af9"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.123793 4940 scope.go:117] "RemoveContainer" containerID="4a27642782b2e952a0f5370d55ba11f8ec629e7e6052c86858b54c7ee7a85af9" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.125363 4940 generic.go:334] "Generic (PLEG): container finished" podID="b805b33b-94ee-4037-907b-339573471ddb" containerID="dae30e891806b3836d03aea29e21b4cac7c6e521846ab376bbffa3ac6c9b6acf" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.125438 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" event={"ID":"b805b33b-94ee-4037-907b-339573471ddb","Type":"ContainerDied","Data":"dae30e891806b3836d03aea29e21b4cac7c6e521846ab376bbffa3ac6c9b6acf"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.125779 4940 scope.go:117] "RemoveContainer" containerID="dae30e891806b3836d03aea29e21b4cac7c6e521846ab376bbffa3ac6c9b6acf" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.126004 4940 scope.go:117] "RemoveContainer" containerID="5f4d42347e6909bca9c12b0af8c30a53b6a069559b2ae18120a71d8c27081a74" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.131511 4940 generic.go:334] "Generic (PLEG): container finished" podID="7fcb4d96-f7a7-4ead-a820-db2eb2785a87" containerID="dd335cbc7ae6c9b481eedac74682490a610f90013f79eb0a568075def00a2b2a" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.131570 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" event={"ID":"7fcb4d96-f7a7-4ead-a820-db2eb2785a87","Type":"ContainerDied","Data":"dd335cbc7ae6c9b481eedac74682490a610f90013f79eb0a568075def00a2b2a"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.131914 4940 scope.go:117] "RemoveContainer" containerID="dd335cbc7ae6c9b481eedac74682490a610f90013f79eb0a568075def00a2b2a" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.134020 4940 generic.go:334] "Generic (PLEG): container finished" podID="9527c833-8bce-440b-b4e5-ca0a08ef7d28" containerID="620e08f281a32a6ef218ad872543359ed7886bba940b837e8009912ee4416c94" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.134104 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" event={"ID":"9527c833-8bce-440b-b4e5-ca0a08ef7d28","Type":"ContainerDied","Data":"620e08f281a32a6ef218ad872543359ed7886bba940b837e8009912ee4416c94"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.134677 4940 scope.go:117] "RemoveContainer" containerID="620e08f281a32a6ef218ad872543359ed7886bba940b837e8009912ee4416c94" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.134974 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-6qpkm_openstack-operators(9527c833-8bce-440b-b4e5-ca0a08ef7d28)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" podUID="9527c833-8bce-440b-b4e5-ca0a08ef7d28" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.137552 4940 generic.go:334] "Generic (PLEG): container finished" podID="7ce6057b-0d67-48fc-9d34-b6574eda6978" containerID="00848784790fb5cffd7afdd96fd26718effdb57fc586df6db39ff156631e937f" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.137603 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" event={"ID":"7ce6057b-0d67-48fc-9d34-b6574eda6978","Type":"ContainerDied","Data":"00848784790fb5cffd7afdd96fd26718effdb57fc586df6db39ff156631e937f"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.137934 4940 scope.go:117] "RemoveContainer" containerID="00848784790fb5cffd7afdd96fd26718effdb57fc586df6db39ff156631e937f" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.138162 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-d75td_openstack-operators(7ce6057b-0d67-48fc-9d34-b6574eda6978)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" podUID="7ce6057b-0d67-48fc-9d34-b6574eda6978" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.140763 4940 generic.go:334] "Generic (PLEG): container finished" podID="7756325b-5cc4-4eb6-ae14-5f71924c3413" containerID="1368b719242c4ac4a498216d0acc1664dd0f317ddd6f17bc70d5ea6694d82f50" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.140826 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" event={"ID":"7756325b-5cc4-4eb6-ae14-5f71924c3413","Type":"ContainerDied","Data":"1368b719242c4ac4a498216d0acc1664dd0f317ddd6f17bc70d5ea6694d82f50"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.141189 4940 scope.go:117] "RemoveContainer" containerID="1368b719242c4ac4a498216d0acc1664dd0f317ddd6f17bc70d5ea6694d82f50" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.141390 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-2ddr6_openstack-operators(7756325b-5cc4-4eb6-ae14-5f71924c3413)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" podUID="7756325b-5cc4-4eb6-ae14-5f71924c3413" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.155255 4940 generic.go:334] "Generic (PLEG): container finished" podID="67bde2c7-9e64-469e-b400-071b32f065da" containerID="cf331a1e77aa5c0a7b311d69dee6ab4647bcbf431060ebf2435b3dd074d06258" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.155348 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" event={"ID":"67bde2c7-9e64-469e-b400-071b32f065da","Type":"ContainerDied","Data":"cf331a1e77aa5c0a7b311d69dee6ab4647bcbf431060ebf2435b3dd074d06258"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.165156 4940 scope.go:117] "RemoveContainer" containerID="cf331a1e77aa5c0a7b311d69dee6ab4647bcbf431060ebf2435b3dd074d06258" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.167685 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-vcr8t_openstack-operators(67bde2c7-9e64-469e-b400-071b32f065da)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" podUID="67bde2c7-9e64-469e-b400-071b32f065da" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.176219 4940 generic.go:334] "Generic (PLEG): container finished" podID="51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5" containerID="7533bf72de85df68f016e542a10b7770d422cc6bd8cd651de41d854aaa0b3eb0" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.176373 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" event={"ID":"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5","Type":"ContainerDied","Data":"7533bf72de85df68f016e542a10b7770d422cc6bd8cd651de41d854aaa0b3eb0"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.177308 4940 scope.go:117] "RemoveContainer" containerID="7533bf72de85df68f016e542a10b7770d422cc6bd8cd651de41d854aaa0b3eb0" Nov 26 07:18:48 crc kubenswrapper[4940]: E1126 07:18:48.177927 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-s2cvl_openstack-operators(51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" podUID="51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.212740 4940 generic.go:334] "Generic (PLEG): container finished" podID="5c068c7e-f13c-45ca-b161-e590eefdd568" containerID="2e2fef8e305481c0f31aa7530decb9f6de8e189b3c14190ad8d0f07303323c0f" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.212979 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" event={"ID":"5c068c7e-f13c-45ca-b161-e590eefdd568","Type":"ContainerDied","Data":"2e2fef8e305481c0f31aa7530decb9f6de8e189b3c14190ad8d0f07303323c0f"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.213450 4940 scope.go:117] "RemoveContainer" containerID="2e2fef8e305481c0f31aa7530decb9f6de8e189b3c14190ad8d0f07303323c0f" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.218414 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" event={"ID":"192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea","Type":"ContainerDied","Data":"df807c7ebe9dbedf3d4860022ba1efc7abaeb3f07501f30542d513f336d18784"} Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.218921 4940 scope.go:117] "RemoveContainer" containerID="df807c7ebe9dbedf3d4860022ba1efc7abaeb3f07501f30542d513f336d18784" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.219279 4940 generic.go:334] "Generic (PLEG): container finished" podID="192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea" containerID="df807c7ebe9dbedf3d4860022ba1efc7abaeb3f07501f30542d513f336d18784" exitCode=1 Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.257632 4940 scope.go:117] "RemoveContainer" containerID="179f7cba17ee6f72630f8e9d0839668d4b1fc29ece555b99ba134421ede5478d" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.334694 4940 scope.go:117] "RemoveContainer" containerID="a18a90bdf99dc5707aa80d198343866fac1bb9413286ee9c066621e591d80395" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.381399 4940 scope.go:117] "RemoveContainer" containerID="770f0e888ed9d736e24b973a0db645ac5c63d4728e3246d8f6213419e6924c37" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.423367 4940 scope.go:117] "RemoveContainer" containerID="f3b11de0cba5fabe5e4f8b771ef0ba514601283f5a893290ce592c51a22c14c2" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.449448 4940 scope.go:117] "RemoveContainer" containerID="6be974cfc983b06d8579e128e6bb85eceb785f44210e86cd7c82d9cdeed56b9f" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.476412 4940 scope.go:117] "RemoveContainer" containerID="f6a59ff24ca24b60c8df863f4660f2261fca6f6898f69caf7a39cd40ea7c334b" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.502603 4940 scope.go:117] "RemoveContainer" containerID="f537c33fd1350f4fc54f21bbf168f336ab2ca52bb8da5666d8584afe96ecb173" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.535091 4940 scope.go:117] "RemoveContainer" containerID="66e7036b9edbb973ac2dfbc3f9f84d9976a9419baa0b52a8f311c421d91c0fd1" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.556997 4940 scope.go:117] "RemoveContainer" containerID="9db78c16f5b35cf049201d82712db21f533c70a534a80df60cc4689a577b7131" Nov 26 07:18:48 crc kubenswrapper[4940]: I1126 07:18:48.574204 4940 scope.go:117] "RemoveContainer" containerID="2442ed10e98338fffcf8e1b946ea1b87654a5e145291979c0df657ceb62cb316" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.256118 4940 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="a139d93c-1e31-4f9a-9aff-136926e53860" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.256225 4940 generic.go:334] "Generic (PLEG): container finished" podID="5ac0ef91-42dc-4bed-b5bc-4c668b3249cc" containerID="d68bf5627d4dddece55104423d33c887377a03504588c8d1fa5c71e80e70a53b" exitCode=1 Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.256337 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" event={"ID":"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc","Type":"ContainerDied","Data":"d68bf5627d4dddece55104423d33c887377a03504588c8d1fa5c71e80e70a53b"} Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.256468 4940 scope.go:117] "RemoveContainer" containerID="4a27642782b2e952a0f5370d55ba11f8ec629e7e6052c86858b54c7ee7a85af9" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.257319 4940 scope.go:117] "RemoveContainer" containerID="d68bf5627d4dddece55104423d33c887377a03504588c8d1fa5c71e80e70a53b" Nov 26 07:18:49 crc kubenswrapper[4940]: E1126 07:18:49.257989 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-nwpjs_openstack-operators(5ac0ef91-42dc-4bed-b5bc-4c668b3249cc)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" podUID="5ac0ef91-42dc-4bed-b5bc-4c668b3249cc" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.277494 4940 generic.go:334] "Generic (PLEG): container finished" podID="68cd61b8-efae-4aef-bd7a-3e90201b5809" containerID="ba79d9254bec7d56b516b60c0d086f6554898beeced6c19f46d162cddb110d13" exitCode=1 Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.277620 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" event={"ID":"68cd61b8-efae-4aef-bd7a-3e90201b5809","Type":"ContainerDied","Data":"ba79d9254bec7d56b516b60c0d086f6554898beeced6c19f46d162cddb110d13"} Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.279853 4940 scope.go:117] "RemoveContainer" containerID="ba79d9254bec7d56b516b60c0d086f6554898beeced6c19f46d162cddb110d13" Nov 26 07:18:49 crc kubenswrapper[4940]: E1126 07:18:49.280431 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-qsrc5_openstack-operators(68cd61b8-efae-4aef-bd7a-3e90201b5809)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" podUID="68cd61b8-efae-4aef-bd7a-3e90201b5809" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.281092 4940 generic.go:334] "Generic (PLEG): container finished" podID="7fcb4d96-f7a7-4ead-a820-db2eb2785a87" containerID="43325540e0d3a4743aaaefc3c8f5ed3290dd693422af345cae4bd42a952a4b9b" exitCode=1 Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.281174 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" event={"ID":"7fcb4d96-f7a7-4ead-a820-db2eb2785a87","Type":"ContainerDied","Data":"43325540e0d3a4743aaaefc3c8f5ed3290dd693422af345cae4bd42a952a4b9b"} Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.281663 4940 scope.go:117] "RemoveContainer" containerID="43325540e0d3a4743aaaefc3c8f5ed3290dd693422af345cae4bd42a952a4b9b" Nov 26 07:18:49 crc kubenswrapper[4940]: E1126 07:18:49.282000 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-jgp88_openstack-operators(7fcb4d96-f7a7-4ead-a820-db2eb2785a87)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" podUID="7fcb4d96-f7a7-4ead-a820-db2eb2785a87" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.293954 4940 generic.go:334] "Generic (PLEG): container finished" podID="5c068c7e-f13c-45ca-b161-e590eefdd568" containerID="dc07dc321ceefa9f79f1390ba1d912bb5b3042fa9bb6bf70370fd83afefd0ad5" exitCode=1 Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.294027 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" event={"ID":"5c068c7e-f13c-45ca-b161-e590eefdd568","Type":"ContainerDied","Data":"dc07dc321ceefa9f79f1390ba1d912bb5b3042fa9bb6bf70370fd83afefd0ad5"} Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.294663 4940 scope.go:117] "RemoveContainer" containerID="dc07dc321ceefa9f79f1390ba1d912bb5b3042fa9bb6bf70370fd83afefd0ad5" Nov 26 07:18:49 crc kubenswrapper[4940]: E1126 07:18:49.294919 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-lzctw_openstack-operators(5c068c7e-f13c-45ca-b161-e590eefdd568)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" podUID="5c068c7e-f13c-45ca-b161-e590eefdd568" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.296448 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" event={"ID":"192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea","Type":"ContainerStarted","Data":"eb2c840b937ed5c0ce49b5745afd9330c5072213d5a1b30243fe7ebefc8a87fd"} Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.297070 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.305839 4940 generic.go:334] "Generic (PLEG): container finished" podID="b805b33b-94ee-4037-907b-339573471ddb" containerID="d16757cd44074c00433995bf440fe661edbbee2e301990826beef5a3a36fa129" exitCode=1 Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.305909 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" event={"ID":"b805b33b-94ee-4037-907b-339573471ddb","Type":"ContainerDied","Data":"d16757cd44074c00433995bf440fe661edbbee2e301990826beef5a3a36fa129"} Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.306530 4940 scope.go:117] "RemoveContainer" containerID="d16757cd44074c00433995bf440fe661edbbee2e301990826beef5a3a36fa129" Nov 26 07:18:49 crc kubenswrapper[4940]: E1126 07:18:49.306812 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-txjbc_openstack-operators(b805b33b-94ee-4037-907b-339573471ddb)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" podUID="b805b33b-94ee-4037-907b-339573471ddb" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.308104 4940 generic.go:334] "Generic (PLEG): container finished" podID="e4255a56-ed59-4cad-90a4-91abb39144d4" containerID="a0797cbb3651c470f7c2c79cd72664b0ecf34cb8800ecb746a8e38816bc274d9" exitCode=1 Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.308206 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" event={"ID":"e4255a56-ed59-4cad-90a4-91abb39144d4","Type":"ContainerDied","Data":"a0797cbb3651c470f7c2c79cd72664b0ecf34cb8800ecb746a8e38816bc274d9"} Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.309457 4940 scope.go:117] "RemoveContainer" containerID="a0797cbb3651c470f7c2c79cd72664b0ecf34cb8800ecb746a8e38816bc274d9" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.344680 4940 scope.go:117] "RemoveContainer" containerID="d36bc06cba1e5be4028d78492a3e4374456c16acd82e71d9f36c6c42021bfad6" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.437707 4940 scope.go:117] "RemoveContainer" containerID="dd335cbc7ae6c9b481eedac74682490a610f90013f79eb0a568075def00a2b2a" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.487654 4940 scope.go:117] "RemoveContainer" containerID="2e2fef8e305481c0f31aa7530decb9f6de8e189b3c14190ad8d0f07303323c0f" Nov 26 07:18:49 crc kubenswrapper[4940]: I1126 07:18:49.516643 4940 scope.go:117] "RemoveContainer" containerID="dae30e891806b3836d03aea29e21b4cac7c6e521846ab376bbffa3ac6c9b6acf" Nov 26 07:18:50 crc kubenswrapper[4940]: I1126 07:18:50.330556 4940 generic.go:334] "Generic (PLEG): container finished" podID="e4255a56-ed59-4cad-90a4-91abb39144d4" containerID="a883a9ea21732e44c8091c724996b119c4a80b3e69f322f876176b5133cf07de" exitCode=1 Nov 26 07:18:50 crc kubenswrapper[4940]: I1126 07:18:50.331171 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" event={"ID":"e4255a56-ed59-4cad-90a4-91abb39144d4","Type":"ContainerDied","Data":"a883a9ea21732e44c8091c724996b119c4a80b3e69f322f876176b5133cf07de"} Nov 26 07:18:50 crc kubenswrapper[4940]: I1126 07:18:50.331819 4940 scope.go:117] "RemoveContainer" containerID="a0797cbb3651c470f7c2c79cd72664b0ecf34cb8800ecb746a8e38816bc274d9" Nov 26 07:18:50 crc kubenswrapper[4940]: I1126 07:18:50.332321 4940 scope.go:117] "RemoveContainer" containerID="a883a9ea21732e44c8091c724996b119c4a80b3e69f322f876176b5133cf07de" Nov 26 07:18:50 crc kubenswrapper[4940]: E1126 07:18:50.332552 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-xbbg2_openstack-operators(e4255a56-ed59-4cad-90a4-91abb39144d4)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" podUID="e4255a56-ed59-4cad-90a4-91abb39144d4" Nov 26 07:18:51 crc kubenswrapper[4940]: I1126 07:18:51.177830 4940 scope.go:117] "RemoveContainer" containerID="bba1a12c3448d128ce989f7b0c72c53c7d0de1d5769bc2644482d3a2034f39f3" Nov 26 07:18:51 crc kubenswrapper[4940]: I1126 07:18:51.319545 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp" Nov 26 07:18:52 crc kubenswrapper[4940]: I1126 07:18:52.363562 4940 generic.go:334] "Generic (PLEG): container finished" podID="4415b953-7e66-4d84-acde-32474c6d0ebf" containerID="3982a13d0a7f3946864114dbacc34bd5febaa788dc98baf94f62d8fdd739c0f3" exitCode=1 Nov 26 07:18:52 crc kubenswrapper[4940]: I1126 07:18:52.363637 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" event={"ID":"4415b953-7e66-4d84-acde-32474c6d0ebf","Type":"ContainerDied","Data":"3982a13d0a7f3946864114dbacc34bd5febaa788dc98baf94f62d8fdd739c0f3"} Nov 26 07:18:52 crc kubenswrapper[4940]: I1126 07:18:52.364610 4940 scope.go:117] "RemoveContainer" containerID="bba1a12c3448d128ce989f7b0c72c53c7d0de1d5769bc2644482d3a2034f39f3" Nov 26 07:18:52 crc kubenswrapper[4940]: I1126 07:18:52.365475 4940 scope.go:117] "RemoveContainer" containerID="3982a13d0a7f3946864114dbacc34bd5febaa788dc98baf94f62d8fdd739c0f3" Nov 26 07:18:52 crc kubenswrapper[4940]: E1126 07:18:52.366140 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-6f8d6cc986-tlmk2_metallb-system(4415b953-7e66-4d84-acde-32474c6d0ebf)\"" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" Nov 26 07:18:54 crc kubenswrapper[4940]: I1126 07:18:54.904226 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:18:54 crc kubenswrapper[4940]: I1126 07:18:54.905740 4940 scope.go:117] "RemoveContainer" containerID="00848784790fb5cffd7afdd96fd26718effdb57fc586df6db39ff156631e937f" Nov 26 07:18:54 crc kubenswrapper[4940]: E1126 07:18:54.906331 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-d75td_openstack-operators(7ce6057b-0d67-48fc-9d34-b6574eda6978)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" podUID="7ce6057b-0d67-48fc-9d34-b6574eda6978" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.010202 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.010875 4940 scope.go:117] "RemoveContainer" containerID="5bf48035f1dc081adc3b475f5ac9c0f42e7241c3239112cfc3df0c22ab9b3ca2" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.011206 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-97dsx_openstack-operators(b53f82af-849a-47b4-a878-676055ad11ef)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" podUID="b53f82af-849a-47b4-a878-676055ad11ef" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.195296 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.196248 4940 scope.go:117] "RemoveContainer" containerID="4bc9e973f54133c60a6bd459b6698a4ed7eef8ce408382a226113cb0208a53e2" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.196641 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-hxbtm_openstack-operators(86296495-65bc-46a3-a775-621a7bf1745f)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" podUID="86296495-65bc-46a3-a775-621a7bf1745f" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.217922 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.218544 4940 scope.go:117] "RemoveContainer" containerID="620e08f281a32a6ef218ad872543359ed7886bba940b837e8009912ee4416c94" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.218778 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-6qpkm_openstack-operators(9527c833-8bce-440b-b4e5-ca0a08ef7d28)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" podUID="9527c833-8bce-440b-b4e5-ca0a08ef7d28" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.245126 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.246109 4940 scope.go:117] "RemoveContainer" containerID="60be7d9a7140bc14412456bd45e1620a443c10884f02b8eb806bb6080c6608d2" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.246644 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-55vwf_openstack-operators(327827f9-8ca3-4d2e-8478-ace9eb784b21)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" podUID="327827f9-8ca3-4d2e-8478-ace9eb784b21" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.273715 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.274748 4940 scope.go:117] "RemoveContainer" containerID="7533bf72de85df68f016e542a10b7770d422cc6bd8cd651de41d854aaa0b3eb0" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.275266 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-s2cvl_openstack-operators(51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" podUID="51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.294770 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.295624 4940 scope.go:117] "RemoveContainer" containerID="a883a9ea21732e44c8091c724996b119c4a80b3e69f322f876176b5133cf07de" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.295919 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-xbbg2_openstack-operators(e4255a56-ed59-4cad-90a4-91abb39144d4)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" podUID="e4255a56-ed59-4cad-90a4-91abb39144d4" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.321088 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.321759 4940 scope.go:117] "RemoveContainer" containerID="ba79d9254bec7d56b516b60c0d086f6554898beeced6c19f46d162cddb110d13" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.322106 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-qsrc5_openstack-operators(68cd61b8-efae-4aef-bd7a-3e90201b5809)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" podUID="68cd61b8-efae-4aef-bd7a-3e90201b5809" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.340513 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.341193 4940 scope.go:117] "RemoveContainer" containerID="866f583b325e790dfade7bcadcc88883562243a5f772f9878e2b3321b4a16d09" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.341515 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-qwlpn_openstack-operators(e46c7b1d-e02f-4807-a650-1038eba64162)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" podUID="e46c7b1d-e02f-4807-a650-1038eba64162" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.354749 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.355520 4940 scope.go:117] "RemoveContainer" containerID="dc07dc321ceefa9f79f1390ba1d912bb5b3042fa9bb6bf70370fd83afefd0ad5" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.356079 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-lzctw_openstack-operators(5c068c7e-f13c-45ca-b161-e590eefdd568)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" podUID="5c068c7e-f13c-45ca-b161-e590eefdd568" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.366742 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.367379 4940 scope.go:117] "RemoveContainer" containerID="5ef65377077da8c4fe707e50d1a6bd73746253ef84de16f52493d676ac59e262" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.367595 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-x9ffm_openstack-operators(2b2e7f46-8ad4-4361-8e95-76aa1e091665)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" podUID="2b2e7f46-8ad4-4361-8e95-76aa1e091665" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.377875 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.378526 4940 scope.go:117] "RemoveContainer" containerID="eb2c0e24126f4a93298401a8ddd378e90fedcb87cd4e47890243297f16ed4f9a" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.378737 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-ht6zr_openstack-operators(0eca9bcc-5909-48e2-927e-b059359977d5)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.382205 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.384588 4940 scope.go:117] "RemoveContainer" containerID="c197f7b26e412ffceddb13315c7001ac431ad74018789b7199e6b9622a752663" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.384843 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-5p8hf_openstack-operators(15d11cf9-51e8-4f1e-880e-86d9bba60224)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.400710 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.401284 4940 scope.go:117] "RemoveContainer" containerID="b9f8f099156e231d8e82a0d4acde2d14b09f087a6cf0a126e58ced6fbea9dae1" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.401477 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-jxxwj_openstack-operators(e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" podUID="e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.425423 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.426247 4940 scope.go:117] "RemoveContainer" containerID="1368b719242c4ac4a498216d0acc1664dd0f317ddd6f17bc70d5ea6694d82f50" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.426540 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-2ddr6_openstack-operators(7756325b-5cc4-4eb6-ae14-5f71924c3413)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" podUID="7756325b-5cc4-4eb6-ae14-5f71924c3413" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.441137 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.441720 4940 scope.go:117] "RemoveContainer" containerID="d68bf5627d4dddece55104423d33c887377a03504588c8d1fa5c71e80e70a53b" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.441941 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-nwpjs_openstack-operators(5ac0ef91-42dc-4bed-b5bc-4c668b3249cc)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" podUID="5ac0ef91-42dc-4bed-b5bc-4c668b3249cc" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.468708 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.469281 4940 scope.go:117] "RemoveContainer" containerID="43325540e0d3a4743aaaefc3c8f5ed3290dd693422af345cae4bd42a952a4b9b" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.469473 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-jgp88_openstack-operators(7fcb4d96-f7a7-4ead-a820-db2eb2785a87)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" podUID="7fcb4d96-f7a7-4ead-a820-db2eb2785a87" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.544484 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-4j248" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.676729 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.677404 4940 scope.go:117] "RemoveContainer" containerID="cf331a1e77aa5c0a7b311d69dee6ab4647bcbf431060ebf2435b3dd074d06258" Nov 26 07:18:55 crc kubenswrapper[4940]: E1126 07:18:55.677698 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-vcr8t_openstack-operators(67bde2c7-9e64-469e-b400-071b32f065da)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" podUID="67bde2c7-9e64-469e-b400-071b32f065da" Nov 26 07:18:55 crc kubenswrapper[4940]: I1126 07:18:55.978187 4940 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.166731 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.489098 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.579841 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-4l8m6" Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.668789 4940 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-2zs7c" Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.776750 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.830634 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.830723 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.831693 4940 scope.go:117] "RemoveContainer" containerID="d16757cd44074c00433995bf440fe661edbbee2e301990826beef5a3a36fa129" Nov 26 07:18:56 crc kubenswrapper[4940]: E1126 07:18:56.832182 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-txjbc_openstack-operators(b805b33b-94ee-4037-907b-339573471ddb)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" podUID="b805b33b-94ee-4037-907b-339573471ddb" Nov 26 07:18:56 crc kubenswrapper[4940]: I1126 07:18:56.978713 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 07:18:57 crc kubenswrapper[4940]: I1126 07:18:57.153138 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 07:18:57 crc kubenswrapper[4940]: I1126 07:18:57.162331 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 07:18:57 crc kubenswrapper[4940]: I1126 07:18:57.395321 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 07:18:57 crc kubenswrapper[4940]: I1126 07:18:57.414784 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-49nb9" Nov 26 07:18:57 crc kubenswrapper[4940]: I1126 07:18:57.576742 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:18:57 crc kubenswrapper[4940]: I1126 07:18:57.577870 4940 scope.go:117] "RemoveContainer" containerID="7fd50f7016eb2306511a570e0fd391c32c2de106f6fd5c7f0f303eba054bc644" Nov 26 07:18:57 crc kubenswrapper[4940]: I1126 07:18:57.956297 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.051798 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.087111 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.164362 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.195029 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.198305 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.241336 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.298360 4940 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.340371 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.360502 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.371188 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.425101 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.429820 4940 generic.go:334] "Generic (PLEG): container finished" podID="df395369-43ff-4cd2-af6e-60a9a96a4d66" containerID="cdec025dbea529fed26334e8de3a917e571633b422993ee659d8489bf630449f" exitCode=1 Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.429893 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" event={"ID":"df395369-43ff-4cd2-af6e-60a9a96a4d66","Type":"ContainerDied","Data":"cdec025dbea529fed26334e8de3a917e571633b422993ee659d8489bf630449f"} Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.429947 4940 scope.go:117] "RemoveContainer" containerID="7fd50f7016eb2306511a570e0fd391c32c2de106f6fd5c7f0f303eba054bc644" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.430812 4940 scope.go:117] "RemoveContainer" containerID="cdec025dbea529fed26334e8de3a917e571633b422993ee659d8489bf630449f" Nov 26 07:18:58 crc kubenswrapper[4940]: E1126 07:18:58.431295 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-7dkrp_openstack-operators(df395369-43ff-4cd2-af6e-60a9a96a4d66)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" podUID="df395369-43ff-4cd2-af6e-60a9a96a4d66" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.550485 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-xb29d" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.553080 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.605907 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.670793 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.834879 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.949381 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 07:18:58 crc kubenswrapper[4940]: I1126 07:18:58.964824 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.012955 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.098666 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.200200 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.214254 4940 scope.go:117] "RemoveContainer" containerID="ac48e187fbac899a570e74f5231f7277c945f7367cd10441dd2d9c1dc5e9497f" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.283560 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.379202 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.391323 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.419745 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.438531 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zrxd8" event={"ID":"e8c9bb46-a618-437a-914b-6cb9c1ede58c","Type":"ContainerStarted","Data":"43d797de95d39276f3dc8e4089f59f09905136f0994600a8f3be9366d2744f1e"} Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.447975 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.531492 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.533270 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.558924 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.566334 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.568398 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-58mnz" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.590356 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.670401 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.697342 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-qm25m" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.752951 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.767693 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.791404 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.860287 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-mjg56" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.861341 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.895835 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.920635 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.960851 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.961111 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:18:59 crc kubenswrapper[4940]: I1126 07:18:59.962018 4940 scope.go:117] "RemoveContainer" containerID="3982a13d0a7f3946864114dbacc34bd5febaa788dc98baf94f62d8fdd739c0f3" Nov 26 07:18:59 crc kubenswrapper[4940]: E1126 07:18:59.962444 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-6f8d6cc986-tlmk2_metallb-system(4415b953-7e66-4d84-acde-32474c6d0ebf)\"" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" podUID="4415b953-7e66-4d84-acde-32474c6d0ebf" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.012799 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.139966 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.201726 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.215073 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.275158 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.373260 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.386514 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-kdtmq" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.391242 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.459738 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.554722 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.664352 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.705507 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.797327 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.840676 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.929964 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 07:19:00 crc kubenswrapper[4940]: I1126 07:19:00.956519 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.010658 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.031001 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.059131 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.183896 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.205856 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-zt67h" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.220529 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.236468 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.261282 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.380889 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.383942 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.422803 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.430779 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-qf88j" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.454669 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-wtdks" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.462727 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.513674 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.585482 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.633441 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.704243 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-9prwq" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.825150 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.837535 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.879261 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.914450 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.957678 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 07:19:01 crc kubenswrapper[4940]: I1126 07:19:01.988258 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.008096 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.082952 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.110821 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-55fx6" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.208798 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.398421 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.411311 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.470310 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-gmc8n" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.486618 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.508583 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.533065 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-5552r" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.538021 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.544120 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.584160 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.670794 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.735908 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.805888 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.818755 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-fd8hz" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.820932 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.872504 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.912880 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-jmwzs" Nov 26 07:19:02 crc kubenswrapper[4940]: I1126 07:19:02.981746 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.000737 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.039112 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.045469 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.071933 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.139279 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.161300 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.187007 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.307148 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.307160 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.308586 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.351823 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.390439 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.447813 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-xxxkf" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.457184 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.474636 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.595116 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-fxhxc" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.599713 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.602942 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.608887 4940 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.628127 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.698576 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.755353 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.781067 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.813246 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.856195 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.871770 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.876267 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.917023 4940 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.921802 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.928666 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.932553 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 07:19:03 crc kubenswrapper[4940]: I1126 07:19:03.980465 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.070434 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.074723 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.075830 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.212795 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.237396 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.379991 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.399790 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.404012 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.457095 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.569595 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.578062 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.628716 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.692119 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.714677 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.724021 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.750490 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-9kxgd" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.767873 4940 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-hzlmb" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.770863 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.903303 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.903902 4940 scope.go:117] "RemoveContainer" containerID="00848784790fb5cffd7afdd96fd26718effdb57fc586df6db39ff156631e937f" Nov 26 07:19:04 crc kubenswrapper[4940]: I1126 07:19:04.983737 4940 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-vgvsv" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.010503 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.011534 4940 scope.go:117] "RemoveContainer" containerID="5bf48035f1dc081adc3b475f5ac9c0f42e7241c3239112cfc3df0c22ab9b3ca2" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.032772 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.041100 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.059843 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.075264 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.075467 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.150435 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.195513 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.196210 4940 scope.go:117] "RemoveContainer" containerID="4bc9e973f54133c60a6bd459b6698a4ed7eef8ce408382a226113cb0208a53e2" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.218689 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.219501 4940 scope.go:117] "RemoveContainer" containerID="620e08f281a32a6ef218ad872543359ed7886bba940b837e8009912ee4416c94" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.244677 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.245372 4940 scope.go:117] "RemoveContainer" containerID="60be7d9a7140bc14412456bd45e1620a443c10884f02b8eb806bb6080c6608d2" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.273844 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.274637 4940 scope.go:117] "RemoveContainer" containerID="7533bf72de85df68f016e542a10b7770d422cc6bd8cd651de41d854aaa0b3eb0" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.294129 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.294305 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.294821 4940 scope.go:117] "RemoveContainer" containerID="a883a9ea21732e44c8091c724996b119c4a80b3e69f322f876176b5133cf07de" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.321385 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.321920 4940 scope.go:117] "RemoveContainer" containerID="ba79d9254bec7d56b516b60c0d086f6554898beeced6c19f46d162cddb110d13" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.340335 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.341311 4940 scope.go:117] "RemoveContainer" containerID="866f583b325e790dfade7bcadcc88883562243a5f772f9878e2b3321b4a16d09" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.354324 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.360187 4940 scope.go:117] "RemoveContainer" containerID="dc07dc321ceefa9f79f1390ba1d912bb5b3042fa9bb6bf70370fd83afefd0ad5" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.366290 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.367351 4940 scope.go:117] "RemoveContainer" containerID="5ef65377077da8c4fe707e50d1a6bd73746253ef84de16f52493d676ac59e262" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.377154 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.379591 4940 scope.go:117] "RemoveContainer" containerID="eb2c0e24126f4a93298401a8ddd378e90fedcb87cd4e47890243297f16ed4f9a" Nov 26 07:19:05 crc kubenswrapper[4940]: E1126 07:19:05.380097 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-ht6zr_openstack-operators(0eca9bcc-5909-48e2-927e-b059359977d5)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" podUID="0eca9bcc-5909-48e2-927e-b059359977d5" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.382675 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.384959 4940 scope.go:117] "RemoveContainer" containerID="c197f7b26e412ffceddb13315c7001ac431ad74018789b7199e6b9622a752663" Nov 26 07:19:05 crc kubenswrapper[4940]: E1126 07:19:05.385587 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-5p8hf_openstack-operators(15d11cf9-51e8-4f1e-880e-86d9bba60224)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" podUID="15d11cf9-51e8-4f1e-880e-86d9bba60224" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.400270 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.400895 4940 scope.go:117] "RemoveContainer" containerID="b9f8f099156e231d8e82a0d4acde2d14b09f087a6cf0a126e58ced6fbea9dae1" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.416801 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.424983 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.425881 4940 scope.go:117] "RemoveContainer" containerID="1368b719242c4ac4a498216d0acc1664dd0f317ddd6f17bc70d5ea6694d82f50" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.441101 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.441947 4940 scope.go:117] "RemoveContainer" containerID="d68bf5627d4dddece55104423d33c887377a03504588c8d1fa5c71e80e70a53b" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.468431 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.469164 4940 scope.go:117] "RemoveContainer" containerID="43325540e0d3a4743aaaefc3c8f5ed3290dd693422af345cae4bd42a952a4b9b" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.516122 4940 generic.go:334] "Generic (PLEG): container finished" podID="3632747e-e8d6-4971-a5bf-d07117d69ae7" containerID="9ac83e827985174b813db315c23840531ce688342e12957a0a195482eb3f37f3" exitCode=1 Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.516178 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" event={"ID":"3632747e-e8d6-4971-a5bf-d07117d69ae7","Type":"ContainerDied","Data":"9ac83e827985174b813db315c23840531ce688342e12957a0a195482eb3f37f3"} Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.516765 4940 scope.go:117] "RemoveContainer" containerID="9ac83e827985174b813db315c23840531ce688342e12957a0a195482eb3f37f3" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.520833 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" event={"ID":"7ce6057b-0d67-48fc-9d34-b6574eda6978","Type":"ContainerStarted","Data":"981baf4ef2d0e8b8b9a9d958ed2cca0d4707dc26b0a8609860256a7bad0fbd18"} Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.521323 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.524226 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-m2568" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.536406 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" event={"ID":"b53f82af-849a-47b4-a878-676055ad11ef","Type":"ContainerStarted","Data":"9404fa7d89931394263ef16809add267a243348cbe82deda2a98d6a604744b22"} Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.537207 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.549613 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.551565 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.601329 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.651791 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.677583 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.678570 4940 scope.go:117] "RemoveContainer" containerID="cf331a1e77aa5c0a7b311d69dee6ab4647bcbf431060ebf2435b3dd074d06258" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.689992 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-gmxv9" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.707534 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.717680 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.728949 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.762629 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.767257 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.855598 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.877141 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.923621 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.977484 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 07:19:05 crc kubenswrapper[4940]: I1126 07:19:05.996407 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.042661 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.052415 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.127983 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.149399 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.154227 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.169550 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.196275 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-d2d59" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.206810 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.303424 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.305560 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.315719 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.351264 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.374622 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.449425 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.488601 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.574025 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-pjqx6" event={"ID":"3632747e-e8d6-4971-a5bf-d07117d69ae7","Type":"ContainerStarted","Data":"ec3a437bd4de8421919b1ba8333aba6906fda861836e000cc86e0ec40f240cc3"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.576274 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" event={"ID":"e46c7b1d-e02f-4807-a650-1038eba64162","Type":"ContainerStarted","Data":"6b12c5fdb3e3639ebe9da845ddd74cf311c1dad7181df70539d859ab7ca0947f"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.577238 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.578584 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" event={"ID":"9527c833-8bce-440b-b4e5-ca0a08ef7d28","Type":"ContainerStarted","Data":"3940bb1b95d1e45b7107efcfcd0874d7bb7c2e2b902faaf1d48b1f3ccb6f3a2b"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.578979 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.590325 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" event={"ID":"5c068c7e-f13c-45ca-b161-e590eefdd568","Type":"ContainerStarted","Data":"e17a09b289e47f391e072b95ea0de0214eea483e60aab3067bb866c11a3d5fef"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.590958 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.594932 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" event={"ID":"5ac0ef91-42dc-4bed-b5bc-4c668b3249cc","Type":"ContainerStarted","Data":"eadcf88808ec1365da516495b43e69d2f8fa123274102b3ed72f23b316703fd9"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.595194 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.598581 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" event={"ID":"67bde2c7-9e64-469e-b400-071b32f065da","Type":"ContainerStarted","Data":"adb0ce50278ae0001dcd21f6be6e308dfd085cc8fcbd91ffab413b0aeab66195"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.599234 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.614676 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" event={"ID":"7fcb4d96-f7a7-4ead-a820-db2eb2785a87","Type":"ContainerStarted","Data":"168b7b5a2d3c17968a8f8be4aadcc99d9a4e13f5bfd376ecc8c3438c2d9a688a"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.615375 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.640708 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" event={"ID":"51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5","Type":"ContainerStarted","Data":"7f17f55f6944a88507054a8ca032efb778fc7520d229475e54a6776bb2dc30bb"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.641411 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.643367 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" event={"ID":"327827f9-8ca3-4d2e-8478-ace9eb784b21","Type":"ContainerStarted","Data":"8f6d95f5a293eba6b2c549c71b0f7107c59fcfc6722a61b735baceb37266dec6"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.643917 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.666251 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" event={"ID":"e4255a56-ed59-4cad-90a4-91abb39144d4","Type":"ContainerStarted","Data":"952c8620c423054090b47def3b92407eba810eadf1fdb83f22f8c907b5e944fa"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.666700 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.669704 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" event={"ID":"e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4","Type":"ContainerStarted","Data":"bcbe669edab4927e41fd8a5b18690cc3e6c0f36093097ad49d0abcc0b744225d"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.670284 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.673013 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" event={"ID":"86296495-65bc-46a3-a775-621a7bf1745f","Type":"ContainerStarted","Data":"15a245e16fb0d4007b6e0fec1562fe8d9f6c37d8a39919bd5aa39b0c7e10c4bf"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.673401 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.675848 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" event={"ID":"2b2e7f46-8ad4-4361-8e95-76aa1e091665","Type":"ContainerStarted","Data":"eea734f36c4aba6108d24cebd516e9c19cc70cd8cd79a3182c1f012b189f92ff"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.676245 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.689892 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" event={"ID":"68cd61b8-efae-4aef-bd7a-3e90201b5809","Type":"ContainerStarted","Data":"679a330081e7d6a162d8f03a5182ffb85b095a116bb49e79db41197e784e054a"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.690433 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.711026 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" event={"ID":"7756325b-5cc4-4eb6-ae14-5f71924c3413","Type":"ContainerStarted","Data":"264004d4a29eddab202d4c6312da614c965a83ef46ef7d878e30909c2ffb2a5d"} Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.711409 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.717291 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.808254 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.967438 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.983409 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 07:19:06 crc kubenswrapper[4940]: I1126 07:19:06.997670 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.016651 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.075206 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.170508 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.191105 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.265422 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.306574 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.307808 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.324273 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.412632 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.454694 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.464636 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.537273 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.561749 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.562293 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.562610 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.576798 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.577426 4940 scope.go:117] "RemoveContainer" containerID="cdec025dbea529fed26334e8de3a917e571633b422993ee659d8489bf630449f" Nov 26 07:19:07 crc kubenswrapper[4940]: E1126 07:19:07.577744 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-7dkrp_openstack-operators(df395369-43ff-4cd2-af6e-60a9a96a4d66)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" podUID="df395369-43ff-4cd2-af6e-60a9a96a4d66" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.680471 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.696125 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.708185 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.712745 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.777373 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.805824 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.868684 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-mf8lc" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.868846 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.874886 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 07:19:07 crc kubenswrapper[4940]: I1126 07:19:07.943450 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.020991 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.142857 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.153695 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.226486 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.330658 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.374355 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.386868 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.414421 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.451651 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.454774 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.512623 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.660327 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.715028 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.721401 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.731244 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.739508 4940 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.812204 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.836819 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.962904 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 07:19:08 crc kubenswrapper[4940]: I1126 07:19:08.989440 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.112473 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.146165 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.179269 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.192000 4940 scope.go:117] "RemoveContainer" containerID="d16757cd44074c00433995bf440fe661edbbee2e301990826beef5a3a36fa129" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.226786 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.241227 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-f7795" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.276383 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.395281 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.484409 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.496899 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.501112 4940 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.508687 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0","openstack/ovn-controller-ovs-k9l7t","openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.508770 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.514140 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.531820 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=24.531796069 podStartE2EDuration="24.531796069s" podCreationTimestamp="2025-11-26 07:18:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:19:09.524031733 +0000 UTC m=+1451.044173372" watchObservedRunningTime="2025-11-26 07:19:09.531796069 +0000 UTC m=+1451.051937708" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.569808 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-28dz5" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.639568 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.737488 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" event={"ID":"b805b33b-94ee-4037-907b-339573471ddb","Type":"ContainerStarted","Data":"6f272b0420b51031537ce7bcf714bdbf95359d0143df66f082de317942edaa6d"} Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.738661 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.764140 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.780859 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.865630 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.913916 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.915953 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-wf8qf" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.934785 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.975427 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 07:19:09 crc kubenswrapper[4940]: I1126 07:19:09.995988 4940 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-fr28m" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.004251 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.091861 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.132704 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.137483 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.220636 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.238495 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.289523 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-7rfvc" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.356704 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.395514 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.417850 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.446874 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.468623 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-qjlnv" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.506761 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-kqd9k" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.508452 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.741193 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.772600 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.914316 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-9wqr8" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.945625 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.956128 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 07:19:10 crc kubenswrapper[4940]: I1126 07:19:10.961514 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:10.999973 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.093861 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.176822 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" path="/var/lib/kubelet/pods/1ae63b19-f186-430b-87f0-d058d2efa83c/volumes" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.180138 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" path="/var/lib/kubelet/pods/5f26eaaa-63b0-491d-b664-56edff3be80c/volumes" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.356827 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.381819 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.383768 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.475447 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.477202 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.484173 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.674166 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.817835 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.849361 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-6r6h6" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.891031 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.965639 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 07:19:11 crc kubenswrapper[4940]: I1126 07:19:11.984032 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 07:19:12 crc kubenswrapper[4940]: I1126 07:19:12.060135 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 07:19:12 crc kubenswrapper[4940]: I1126 07:19:12.209916 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 07:19:12 crc kubenswrapper[4940]: I1126 07:19:12.333829 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 07:19:12 crc kubenswrapper[4940]: I1126 07:19:12.884375 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 07:19:13 crc kubenswrapper[4940]: I1126 07:19:13.166661 4940 scope.go:117] "RemoveContainer" containerID="3982a13d0a7f3946864114dbacc34bd5febaa788dc98baf94f62d8fdd739c0f3" Nov 26 07:19:13 crc kubenswrapper[4940]: I1126 07:19:13.787274 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" event={"ID":"4415b953-7e66-4d84-acde-32474c6d0ebf","Type":"ContainerStarted","Data":"64d029d7c4e5012f782b0b6020633b9040e78fe272e7a1841736a6f43c2ea275"} Nov 26 07:19:13 crc kubenswrapper[4940]: I1126 07:19:13.788203 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:19:14 crc kubenswrapper[4940]: I1126 07:19:14.906570 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-d75td" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.011986 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-97dsx" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.197738 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-hxbtm" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.224650 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-6qpkm" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.250373 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-55vwf" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.277772 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-s2cvl" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.309498 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-xbbg2" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.328936 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-qsrc5" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.343672 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-qwlpn" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.357628 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-lzctw" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.372605 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-x9ffm" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.404681 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jxxwj" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.429152 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-2ddr6" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.445683 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nwpjs" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.471049 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-jgp88" Nov 26 07:19:15 crc kubenswrapper[4940]: I1126 07:19:15.679014 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vcr8t" Nov 26 07:19:16 crc kubenswrapper[4940]: I1126 07:19:16.165580 4940 scope.go:117] "RemoveContainer" containerID="c197f7b26e412ffceddb13315c7001ac431ad74018789b7199e6b9622a752663" Nov 26 07:19:16 crc kubenswrapper[4940]: I1126 07:19:16.819341 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" event={"ID":"15d11cf9-51e8-4f1e-880e-86d9bba60224","Type":"ContainerStarted","Data":"f9bb7ca6e83687a448f5ab883c6af027785360633e075ad36a834c90f3931afb"} Nov 26 07:19:16 crc kubenswrapper[4940]: I1126 07:19:16.820075 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:19:16 crc kubenswrapper[4940]: I1126 07:19:16.836485 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-txjbc" Nov 26 07:19:17 crc kubenswrapper[4940]: I1126 07:19:17.166114 4940 scope.go:117] "RemoveContainer" containerID="eb2c0e24126f4a93298401a8ddd378e90fedcb87cd4e47890243297f16ed4f9a" Nov 26 07:19:17 crc kubenswrapper[4940]: I1126 07:19:17.576972 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:19:17 crc kubenswrapper[4940]: I1126 07:19:17.579024 4940 scope.go:117] "RemoveContainer" containerID="cdec025dbea529fed26334e8de3a917e571633b422993ee659d8489bf630449f" Nov 26 07:19:17 crc kubenswrapper[4940]: E1126 07:19:17.579588 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-7dkrp_openstack-operators(df395369-43ff-4cd2-af6e-60a9a96a4d66)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" podUID="df395369-43ff-4cd2-af6e-60a9a96a4d66" Nov 26 07:19:17 crc kubenswrapper[4940]: I1126 07:19:17.834721 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" event={"ID":"0eca9bcc-5909-48e2-927e-b059359977d5","Type":"ContainerStarted","Data":"94a1595d11f6baf2f6891e4cc87e987a1b91c673b90e76ef2f390c68c46c0807"} Nov 26 07:19:17 crc kubenswrapper[4940]: I1126 07:19:17.835445 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:19:19 crc kubenswrapper[4940]: I1126 07:19:19.757652 4940 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 07:19:19 crc kubenswrapper[4940]: I1126 07:19:19.758246 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://c1ab9a87d5d2ccfa26b711c62908b242ca9a913f805f38c90d64487038dfd7b5" gracePeriod=5 Nov 26 07:19:24 crc kubenswrapper[4940]: I1126 07:19:24.906797 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 07:19:24 crc kubenswrapper[4940]: I1126 07:19:24.907361 4940 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="c1ab9a87d5d2ccfa26b711c62908b242ca9a913f805f38c90d64487038dfd7b5" exitCode=137 Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.352875 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.352937 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.381656 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-ht6zr" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.388583 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-5p8hf" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447316 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447386 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447429 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447503 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447519 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447577 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447601 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447615 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447716 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447891 4940 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447913 4940 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447925 4940 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.447936 4940 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.460503 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.549525 4940 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.920498 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.920617 4940 scope.go:117] "RemoveContainer" containerID="c1ab9a87d5d2ccfa26b711c62908b242ca9a913f805f38c90d64487038dfd7b5" Nov 26 07:19:25 crc kubenswrapper[4940]: I1126 07:19:25.920675 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 07:19:27 crc kubenswrapper[4940]: I1126 07:19:27.180836 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 26 07:19:30 crc kubenswrapper[4940]: I1126 07:19:30.222033 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.590028 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fc97g"] Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.591414 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-server" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.591525 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-server" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.591621 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.591702 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.591801 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.591893 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.591990 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-reaper" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.592124 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-reaper" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.592221 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server-init" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.592413 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server-init" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.592502 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" containerName="installer" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.592579 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" containerName="installer" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.592671 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.592747 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.592822 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-server" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.592898 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-server" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.592983 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.593134 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.593241 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.593327 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.593401 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-expirer" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.593480 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-expirer" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.593557 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.593624 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.593711 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-updater" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.593791 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-updater" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.593876 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.593956 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.594055 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-server" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.594143 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-server" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.594219 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.594292 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.594375 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="swift-recon-cron" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.594451 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="swift-recon-cron" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.594535 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="rsync" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.594609 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="rsync" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.594696 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.594769 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: E1126 07:19:31.594854 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-updater" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.594927 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-updater" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.595300 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-server" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.595437 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.595549 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.595654 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="swift-recon-cron" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.595755 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.595858 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-updater" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.595968 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-updater" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596100 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596216 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="rsync" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596325 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596431 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="66d37527-e535-45de-9f92-0f95d9f7a856" containerName="installer" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596538 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-reaper" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596648 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-expirer" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596757 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovs-vswitchd" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596861 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f26eaaa-63b0-491d-b664-56edff3be80c" containerName="ovsdb-server" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.596967 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-replicator" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.597139 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="object-server" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.597256 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="container-auditor" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.597371 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ae63b19-f186-430b-87f0-d058d2efa83c" containerName="account-server" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.598786 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.601485 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fc97g"] Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.737207 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-utilities\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.737563 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ml7nn\" (UniqueName: \"kubernetes.io/projected/bab18c4f-0062-48f3-b320-3e503c35d5b2-kube-api-access-ml7nn\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.737769 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-catalog-content\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.839296 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-catalog-content\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.839362 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-utilities\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.839438 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ml7nn\" (UniqueName: \"kubernetes.io/projected/bab18c4f-0062-48f3-b320-3e503c35d5b2-kube-api-access-ml7nn\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.839933 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-catalog-content\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.840189 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-utilities\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.861607 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ml7nn\" (UniqueName: \"kubernetes.io/projected/bab18c4f-0062-48f3-b320-3e503c35d5b2-kube-api-access-ml7nn\") pod \"certified-operators-fc97g\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:31 crc kubenswrapper[4940]: I1126 07:19:31.958998 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:32 crc kubenswrapper[4940]: I1126 07:19:32.023701 4940 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-77wsc" Nov 26 07:19:32 crc kubenswrapper[4940]: I1126 07:19:32.165871 4940 scope.go:117] "RemoveContainer" containerID="cdec025dbea529fed26334e8de3a917e571633b422993ee659d8489bf630449f" Nov 26 07:19:32 crc kubenswrapper[4940]: I1126 07:19:32.379511 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fc97g"] Nov 26 07:19:33 crc kubenswrapper[4940]: I1126 07:19:33.003305 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" event={"ID":"df395369-43ff-4cd2-af6e-60a9a96a4d66","Type":"ContainerStarted","Data":"94d7f656f8a015a3da6463a768634e57676d741da8e4176f7338d71b45acdf08"} Nov 26 07:19:33 crc kubenswrapper[4940]: I1126 07:19:33.003569 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:19:33 crc kubenswrapper[4940]: I1126 07:19:33.005265 4940 generic.go:334] "Generic (PLEG): container finished" podID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerID="a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e" exitCode=0 Nov 26 07:19:33 crc kubenswrapper[4940]: I1126 07:19:33.005322 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc97g" event={"ID":"bab18c4f-0062-48f3-b320-3e503c35d5b2","Type":"ContainerDied","Data":"a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e"} Nov 26 07:19:33 crc kubenswrapper[4940]: I1126 07:19:33.005351 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc97g" event={"ID":"bab18c4f-0062-48f3-b320-3e503c35d5b2","Type":"ContainerStarted","Data":"7dbcb11df99e47df550725d5874b4903cad211feb79773a1adf6d5ee9d12e9c2"} Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.182401 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dbwh9"] Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.184788 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.192027 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbwh9"] Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.271715 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-utilities\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.271826 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jknzc\" (UniqueName: \"kubernetes.io/projected/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-kube-api-access-jknzc\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.271859 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-catalog-content\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.372690 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l2gwh"] Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.373366 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jknzc\" (UniqueName: \"kubernetes.io/projected/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-kube-api-access-jknzc\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.373423 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-catalog-content\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.373526 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-utilities\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.374033 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-utilities\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.374166 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.374284 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-catalog-content\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.386231 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l2gwh"] Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.404627 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jknzc\" (UniqueName: \"kubernetes.io/projected/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-kube-api-access-jknzc\") pod \"redhat-marketplace-dbwh9\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.475403 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-catalog-content\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.475703 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-utilities\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.475806 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbkkr\" (UniqueName: \"kubernetes.io/projected/0282d0f2-66d7-40e4-96ed-364f44b4b372-kube-api-access-gbkkr\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.502388 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.577209 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-utilities\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.577275 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbkkr\" (UniqueName: \"kubernetes.io/projected/0282d0f2-66d7-40e4-96ed-364f44b4b372-kube-api-access-gbkkr\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.577319 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-catalog-content\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.577868 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-utilities\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.577905 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-catalog-content\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.605843 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbkkr\" (UniqueName: \"kubernetes.io/projected/0282d0f2-66d7-40e4-96ed-364f44b4b372-kube-api-access-gbkkr\") pod \"redhat-operators-l2gwh\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.700407 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:34 crc kubenswrapper[4940]: I1126 07:19:34.976262 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbwh9"] Nov 26 07:19:34 crc kubenswrapper[4940]: W1126 07:19:34.978157 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod199e1cfb_f5d2_4889_ba11_6ae0596f3dff.slice/crio-d0b58269ecdf5b8d0a29473db70732fa6e163f6d4aa678f49b93ad4359fdb8ea WatchSource:0}: Error finding container d0b58269ecdf5b8d0a29473db70732fa6e163f6d4aa678f49b93ad4359fdb8ea: Status 404 returned error can't find the container with id d0b58269ecdf5b8d0a29473db70732fa6e163f6d4aa678f49b93ad4359fdb8ea Nov 26 07:19:35 crc kubenswrapper[4940]: I1126 07:19:35.026635 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbwh9" event={"ID":"199e1cfb-f5d2-4889-ba11-6ae0596f3dff","Type":"ContainerStarted","Data":"d0b58269ecdf5b8d0a29473db70732fa6e163f6d4aa678f49b93ad4359fdb8ea"} Nov 26 07:19:35 crc kubenswrapper[4940]: I1126 07:19:35.142982 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l2gwh"] Nov 26 07:19:35 crc kubenswrapper[4940]: W1126 07:19:35.146267 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0282d0f2_66d7_40e4_96ed_364f44b4b372.slice/crio-b42985e3b2ab8ca88f1191ffd6d04bdf0e9539a3762121ce60c5a5bcccdc4af4 WatchSource:0}: Error finding container b42985e3b2ab8ca88f1191ffd6d04bdf0e9539a3762121ce60c5a5bcccdc4af4: Status 404 returned error can't find the container with id b42985e3b2ab8ca88f1191ffd6d04bdf0e9539a3762121ce60c5a5bcccdc4af4 Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.037300 4940 generic.go:334] "Generic (PLEG): container finished" podID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerID="c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e" exitCode=0 Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.037370 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc97g" event={"ID":"bab18c4f-0062-48f3-b320-3e503c35d5b2","Type":"ContainerDied","Data":"c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e"} Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.039636 4940 generic.go:334] "Generic (PLEG): container finished" podID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerID="864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f" exitCode=0 Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.039680 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbwh9" event={"ID":"199e1cfb-f5d2-4889-ba11-6ae0596f3dff","Type":"ContainerDied","Data":"864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f"} Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.043028 4940 generic.go:334] "Generic (PLEG): container finished" podID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerID="2a64b8015158c7053b5ca75f915f3e47c1d033fca0961c5449b634d0c85bfad9" exitCode=0 Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.043076 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2gwh" event={"ID":"0282d0f2-66d7-40e4-96ed-364f44b4b372","Type":"ContainerDied","Data":"2a64b8015158c7053b5ca75f915f3e47c1d033fca0961c5449b634d0c85bfad9"} Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.043100 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2gwh" event={"ID":"0282d0f2-66d7-40e4-96ed-364f44b4b372","Type":"ContainerStarted","Data":"b42985e3b2ab8ca88f1191ffd6d04bdf0e9539a3762121ce60c5a5bcccdc4af4"} Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.983481 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wsh9l"] Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.985153 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:36 crc kubenswrapper[4940]: I1126 07:19:36.992343 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wsh9l"] Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.053148 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2gwh" event={"ID":"0282d0f2-66d7-40e4-96ed-364f44b4b372","Type":"ContainerStarted","Data":"c39e90d5d20a72175594a7c79c1a9e865cc7995e36cd07f166dee18bfe13139c"} Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.055498 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc97g" event={"ID":"bab18c4f-0062-48f3-b320-3e503c35d5b2","Type":"ContainerStarted","Data":"e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9"} Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.058128 4940 generic.go:334] "Generic (PLEG): container finished" podID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerID="ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700" exitCode=0 Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.058165 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbwh9" event={"ID":"199e1cfb-f5d2-4889-ba11-6ae0596f3dff","Type":"ContainerDied","Data":"ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700"} Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.096249 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fc97g" podStartSLOduration=2.613389599 podStartE2EDuration="6.096224271s" podCreationTimestamp="2025-11-26 07:19:31 +0000 UTC" firstStartedPulling="2025-11-26 07:19:33.006943266 +0000 UTC m=+1474.527084885" lastFinishedPulling="2025-11-26 07:19:36.489777938 +0000 UTC m=+1478.009919557" observedRunningTime="2025-11-26 07:19:37.092468022 +0000 UTC m=+1478.612609641" watchObservedRunningTime="2025-11-26 07:19:37.096224271 +0000 UTC m=+1478.616365910" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.141504 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cm8q\" (UniqueName: \"kubernetes.io/projected/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-kube-api-access-8cm8q\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.141576 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-utilities\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.141666 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-catalog-content\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.242724 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-catalog-content\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.242837 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cm8q\" (UniqueName: \"kubernetes.io/projected/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-kube-api-access-8cm8q\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.242888 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-utilities\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.243551 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-catalog-content\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.243795 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-utilities\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.274213 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cm8q\" (UniqueName: \"kubernetes.io/projected/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-kube-api-access-8cm8q\") pod \"certified-operators-wsh9l\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.333425 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.587122 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-7dkrp" Nov 26 07:19:37 crc kubenswrapper[4940]: I1126 07:19:37.769622 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wsh9l"] Nov 26 07:19:37 crc kubenswrapper[4940]: W1126 07:19:37.775998 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1ee6f9b2_a68f_4ff0_bc6c_48f12275e797.slice/crio-c117eabac4d8806f0df3151fbbd638c2401149dec47159b2585c798a8d6f1e5e WatchSource:0}: Error finding container c117eabac4d8806f0df3151fbbd638c2401149dec47159b2585c798a8d6f1e5e: Status 404 returned error can't find the container with id c117eabac4d8806f0df3151fbbd638c2401149dec47159b2585c798a8d6f1e5e Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.070803 4940 generic.go:334] "Generic (PLEG): container finished" podID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerID="c39e90d5d20a72175594a7c79c1a9e865cc7995e36cd07f166dee18bfe13139c" exitCode=0 Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.070887 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2gwh" event={"ID":"0282d0f2-66d7-40e4-96ed-364f44b4b372","Type":"ContainerDied","Data":"c39e90d5d20a72175594a7c79c1a9e865cc7995e36cd07f166dee18bfe13139c"} Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.073572 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbwh9" event={"ID":"199e1cfb-f5d2-4889-ba11-6ae0596f3dff","Type":"ContainerStarted","Data":"ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e"} Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.075350 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerID="d02857e47d963cfda61f48de40c15ac3a6804d565e87dfc3dfb6f8345605a482" exitCode=0 Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.075420 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wsh9l" event={"ID":"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797","Type":"ContainerDied","Data":"d02857e47d963cfda61f48de40c15ac3a6804d565e87dfc3dfb6f8345605a482"} Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.075456 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wsh9l" event={"ID":"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797","Type":"ContainerStarted","Data":"c117eabac4d8806f0df3151fbbd638c2401149dec47159b2585c798a8d6f1e5e"} Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.135891 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dbwh9" podStartSLOduration=2.680188116 podStartE2EDuration="4.135875515s" podCreationTimestamp="2025-11-26 07:19:34 +0000 UTC" firstStartedPulling="2025-11-26 07:19:36.042204372 +0000 UTC m=+1477.562346011" lastFinishedPulling="2025-11-26 07:19:37.497891791 +0000 UTC m=+1479.018033410" observedRunningTime="2025-11-26 07:19:38.132462467 +0000 UTC m=+1479.652604106" watchObservedRunningTime="2025-11-26 07:19:38.135875515 +0000 UTC m=+1479.656017134" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.573697 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-clc2j"] Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.575397 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.621713 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clc2j"] Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.662108 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx9nl\" (UniqueName: \"kubernetes.io/projected/7e29fd36-80e8-4803-a438-6563640d769d-kube-api-access-bx9nl\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.662149 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-catalog-content\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.662231 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-utilities\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.763995 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx9nl\" (UniqueName: \"kubernetes.io/projected/7e29fd36-80e8-4803-a438-6563640d769d-kube-api-access-bx9nl\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.764363 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-catalog-content\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.764463 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-utilities\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.764922 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-utilities\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.764983 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-catalog-content\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.786641 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx9nl\" (UniqueName: \"kubernetes.io/projected/7e29fd36-80e8-4803-a438-6563640d769d-kube-api-access-bx9nl\") pod \"redhat-marketplace-clc2j\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:38 crc kubenswrapper[4940]: I1126 07:19:38.949136 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.107547 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wsh9l" event={"ID":"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797","Type":"ContainerStarted","Data":"fba8b17b6f1c32b3e9e139b2c0b5cee95f680958fb2d593d6a049073c7081b01"} Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.131660 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2gwh" event={"ID":"0282d0f2-66d7-40e4-96ed-364f44b4b372","Type":"ContainerStarted","Data":"90615a282478c166895f4253b050436ec2d818b99f026886839941e78dbbd47d"} Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.171762 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l2gwh" podStartSLOduration=2.680220237 podStartE2EDuration="5.171743459s" podCreationTimestamp="2025-11-26 07:19:34 +0000 UTC" firstStartedPulling="2025-11-26 07:19:36.046136266 +0000 UTC m=+1477.566277885" lastFinishedPulling="2025-11-26 07:19:38.537659488 +0000 UTC m=+1480.057801107" observedRunningTime="2025-11-26 07:19:39.170282852 +0000 UTC m=+1480.690424471" watchObservedRunningTime="2025-11-26 07:19:39.171743459 +0000 UTC m=+1480.691885078" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.464702 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clc2j"] Nov 26 07:19:39 crc kubenswrapper[4940]: W1126 07:19:39.467671 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e29fd36_80e8_4803_a438_6563640d769d.slice/crio-2ab0b11c46095107e056f486752f4b9dca3211ff06bd62e4c666efc2e95b17ff WatchSource:0}: Error finding container 2ab0b11c46095107e056f486752f4b9dca3211ff06bd62e4c666efc2e95b17ff: Status 404 returned error can't find the container with id 2ab0b11c46095107e056f486752f4b9dca3211ff06bd62e4c666efc2e95b17ff Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.576714 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ff2fz"] Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.578603 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.597067 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ff2fz"] Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.680322 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-catalog-content\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.680868 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dnbc\" (UniqueName: \"kubernetes.io/projected/03b477be-0073-4390-b87a-acdf508074ee-kube-api-access-4dnbc\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.680962 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-utilities\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.782298 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-catalog-content\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.782389 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dnbc\" (UniqueName: \"kubernetes.io/projected/03b477be-0073-4390-b87a-acdf508074ee-kube-api-access-4dnbc\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.782425 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-utilities\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.782865 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-utilities\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.783136 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-catalog-content\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.813743 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dnbc\" (UniqueName: \"kubernetes.io/projected/03b477be-0073-4390-b87a-acdf508074ee-kube-api-access-4dnbc\") pod \"redhat-operators-ff2fz\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:39 crc kubenswrapper[4940]: I1126 07:19:39.894322 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.139565 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerID="fba8b17b6f1c32b3e9e139b2c0b5cee95f680958fb2d593d6a049073c7081b01" exitCode=0 Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.139629 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wsh9l" event={"ID":"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797","Type":"ContainerDied","Data":"fba8b17b6f1c32b3e9e139b2c0b5cee95f680958fb2d593d6a049073c7081b01"} Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.146765 4940 generic.go:334] "Generic (PLEG): container finished" podID="7e29fd36-80e8-4803-a438-6563640d769d" containerID="6a7efed5aa3d50cf1f1fa4d0eb24f7616ee7573ca72f94e47cdd645871c010ab" exitCode=0 Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.148336 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clc2j" event={"ID":"7e29fd36-80e8-4803-a438-6563640d769d","Type":"ContainerDied","Data":"6a7efed5aa3d50cf1f1fa4d0eb24f7616ee7573ca72f94e47cdd645871c010ab"} Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.148369 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clc2j" event={"ID":"7e29fd36-80e8-4803-a438-6563640d769d","Type":"ContainerStarted","Data":"2ab0b11c46095107e056f486752f4b9dca3211ff06bd62e4c666efc2e95b17ff"} Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.328144 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ff2fz"] Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.978130 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x9njh"] Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.981623 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:40 crc kubenswrapper[4940]: I1126 07:19:40.987951 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x9njh"] Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.105476 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-catalog-content\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.105587 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjk77\" (UniqueName: \"kubernetes.io/projected/1743d256-8adb-4a95-a1d3-ec29d932191f-kube-api-access-sjk77\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.105645 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-utilities\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.160722 4940 generic.go:334] "Generic (PLEG): container finished" podID="7e29fd36-80e8-4803-a438-6563640d769d" containerID="d2b035c28fe954cbeac2f5dcbcdc37fe18b75aaef1457f34ebfb76b1becbd2b4" exitCode=0 Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.162166 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clc2j" event={"ID":"7e29fd36-80e8-4803-a438-6563640d769d","Type":"ContainerDied","Data":"d2b035c28fe954cbeac2f5dcbcdc37fe18b75aaef1457f34ebfb76b1becbd2b4"} Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.165279 4940 generic.go:334] "Generic (PLEG): container finished" podID="03b477be-0073-4390-b87a-acdf508074ee" containerID="abd64615531456f98ecfadbbd379a5f9887773d96dabb6f2ad129fb2e5febd23" exitCode=0 Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.190423 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ff2fz" event={"ID":"03b477be-0073-4390-b87a-acdf508074ee","Type":"ContainerDied","Data":"abd64615531456f98ecfadbbd379a5f9887773d96dabb6f2ad129fb2e5febd23"} Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.190464 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ff2fz" event={"ID":"03b477be-0073-4390-b87a-acdf508074ee","Type":"ContainerStarted","Data":"706818166abd90aab80ea4a1761664df250f75305be8c7198c5cad8f0990840e"} Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.190473 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wsh9l" event={"ID":"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797","Type":"ContainerStarted","Data":"0e2f214b56a4574e98fcb309e09713a98d62f7a8f14acf0f28bde2d659fe7c0e"} Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.206788 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjk77\" (UniqueName: \"kubernetes.io/projected/1743d256-8adb-4a95-a1d3-ec29d932191f-kube-api-access-sjk77\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.206860 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-utilities\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.206942 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-catalog-content\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.207487 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-utilities\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.207676 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-catalog-content\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.238589 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjk77\" (UniqueName: \"kubernetes.io/projected/1743d256-8adb-4a95-a1d3-ec29d932191f-kube-api-access-sjk77\") pod \"certified-operators-x9njh\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.240650 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wsh9l" podStartSLOduration=2.781609526 podStartE2EDuration="5.240636335s" podCreationTimestamp="2025-11-26 07:19:36 +0000 UTC" firstStartedPulling="2025-11-26 07:19:38.076908885 +0000 UTC m=+1479.597050504" lastFinishedPulling="2025-11-26 07:19:40.535935694 +0000 UTC m=+1482.056077313" observedRunningTime="2025-11-26 07:19:41.23825255 +0000 UTC m=+1482.758394169" watchObservedRunningTime="2025-11-26 07:19:41.240636335 +0000 UTC m=+1482.760777954" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.305302 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.853549 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x9njh"] Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.962186 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:41 crc kubenswrapper[4940]: I1126 07:19:41.962230 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.068956 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.182406 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9njh" event={"ID":"1743d256-8adb-4a95-a1d3-ec29d932191f","Type":"ContainerStarted","Data":"e3236911323995ddc6b5c3e70920f6c7439aab9fdc17e9ac7d19f1e9ac374e23"} Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.182457 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9njh" event={"ID":"1743d256-8adb-4a95-a1d3-ec29d932191f","Type":"ContainerStarted","Data":"e290ec54c3660004e8e177a7832f3dbbd8010f0e5f5f03818b585efe6e5c262c"} Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.184512 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-85cvv"] Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.210833 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clc2j" event={"ID":"7e29fd36-80e8-4803-a438-6563640d769d","Type":"ContainerStarted","Data":"b261de9b496f56339ed26c834c5dfd9e79095cea67e27556569938928e8cb1df"} Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.211223 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ff2fz" event={"ID":"03b477be-0073-4390-b87a-acdf508074ee","Type":"ContainerStarted","Data":"d557c202fba44003b92a7e1f4a0e38539d5c7d532189c663f086068155b91ffd"} Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.211005 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.216411 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-85cvv"] Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.290220 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.291947 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-clc2j" podStartSLOduration=2.847963991 podStartE2EDuration="4.291934698s" podCreationTimestamp="2025-11-26 07:19:38 +0000 UTC" firstStartedPulling="2025-11-26 07:19:40.153212276 +0000 UTC m=+1481.673353895" lastFinishedPulling="2025-11-26 07:19:41.597182983 +0000 UTC m=+1483.117324602" observedRunningTime="2025-11-26 07:19:42.290461632 +0000 UTC m=+1483.810603251" watchObservedRunningTime="2025-11-26 07:19:42.291934698 +0000 UTC m=+1483.812076317" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.328805 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-catalog-content\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.328939 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-utilities\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.329119 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9prf\" (UniqueName: \"kubernetes.io/projected/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-kube-api-access-r9prf\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.430061 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-utilities\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.430189 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9prf\" (UniqueName: \"kubernetes.io/projected/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-kube-api-access-r9prf\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.430244 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-catalog-content\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.430524 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-utilities\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.430736 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-catalog-content\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.452019 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9prf\" (UniqueName: \"kubernetes.io/projected/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-kube-api-access-r9prf\") pod \"redhat-marketplace-85cvv\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.555005 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:42 crc kubenswrapper[4940]: I1126 07:19:42.859922 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-85cvv"] Nov 26 07:19:42 crc kubenswrapper[4940]: W1126 07:19:42.862554 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1bb9974_4ef0_4c60_8aa4_5834cd1cda88.slice/crio-604c18786bb1f74f3fd3d58a8a4d2f490e5bf16fa7c6d5631ec27337ef22deeb WatchSource:0}: Error finding container 604c18786bb1f74f3fd3d58a8a4d2f490e5bf16fa7c6d5631ec27337ef22deeb: Status 404 returned error can't find the container with id 604c18786bb1f74f3fd3d58a8a4d2f490e5bf16fa7c6d5631ec27337ef22deeb Nov 26 07:19:43 crc kubenswrapper[4940]: I1126 07:19:43.143152 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-nw2st" Nov 26 07:19:43 crc kubenswrapper[4940]: I1126 07:19:43.211760 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85cvv" event={"ID":"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88","Type":"ContainerStarted","Data":"d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9"} Nov 26 07:19:43 crc kubenswrapper[4940]: I1126 07:19:43.211806 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85cvv" event={"ID":"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88","Type":"ContainerStarted","Data":"604c18786bb1f74f3fd3d58a8a4d2f490e5bf16fa7c6d5631ec27337ef22deeb"} Nov 26 07:19:43 crc kubenswrapper[4940]: I1126 07:19:43.219652 4940 generic.go:334] "Generic (PLEG): container finished" podID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerID="e3236911323995ddc6b5c3e70920f6c7439aab9fdc17e9ac7d19f1e9ac374e23" exitCode=0 Nov 26 07:19:43 crc kubenswrapper[4940]: I1126 07:19:43.220019 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9njh" event={"ID":"1743d256-8adb-4a95-a1d3-ec29d932191f","Type":"ContainerDied","Data":"e3236911323995ddc6b5c3e70920f6c7439aab9fdc17e9ac7d19f1e9ac374e23"} Nov 26 07:19:44 crc kubenswrapper[4940]: I1126 07:19:44.230466 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9njh" event={"ID":"1743d256-8adb-4a95-a1d3-ec29d932191f","Type":"ContainerStarted","Data":"195facecc86ce7c93440984f09b505bd3f7ecc04da195f45398f7c635f485a87"} Nov 26 07:19:44 crc kubenswrapper[4940]: I1126 07:19:44.503006 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:44 crc kubenswrapper[4940]: I1126 07:19:44.503234 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:44 crc kubenswrapper[4940]: I1126 07:19:44.553270 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:44 crc kubenswrapper[4940]: I1126 07:19:44.700896 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:44 crc kubenswrapper[4940]: I1126 07:19:44.700950 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:44 crc kubenswrapper[4940]: I1126 07:19:44.744471 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:45 crc kubenswrapper[4940]: I1126 07:19:45.245291 4940 generic.go:334] "Generic (PLEG): container finished" podID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerID="195facecc86ce7c93440984f09b505bd3f7ecc04da195f45398f7c635f485a87" exitCode=0 Nov 26 07:19:45 crc kubenswrapper[4940]: I1126 07:19:45.245353 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9njh" event={"ID":"1743d256-8adb-4a95-a1d3-ec29d932191f","Type":"ContainerDied","Data":"195facecc86ce7c93440984f09b505bd3f7ecc04da195f45398f7c635f485a87"} Nov 26 07:19:45 crc kubenswrapper[4940]: I1126 07:19:45.248711 4940 generic.go:334] "Generic (PLEG): container finished" podID="03b477be-0073-4390-b87a-acdf508074ee" containerID="d557c202fba44003b92a7e1f4a0e38539d5c7d532189c663f086068155b91ffd" exitCode=0 Nov 26 07:19:45 crc kubenswrapper[4940]: I1126 07:19:45.248830 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ff2fz" event={"ID":"03b477be-0073-4390-b87a-acdf508074ee","Type":"ContainerDied","Data":"d557c202fba44003b92a7e1f4a0e38539d5c7d532189c663f086068155b91ffd"} Nov 26 07:19:45 crc kubenswrapper[4940]: I1126 07:19:45.253011 4940 generic.go:334] "Generic (PLEG): container finished" podID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerID="d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9" exitCode=0 Nov 26 07:19:45 crc kubenswrapper[4940]: I1126 07:19:45.253067 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85cvv" event={"ID":"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88","Type":"ContainerDied","Data":"d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9"} Nov 26 07:19:45 crc kubenswrapper[4940]: I1126 07:19:45.310743 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:19:45 crc kubenswrapper[4940]: I1126 07:19:45.317970 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.177609 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8k8ql"] Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.179812 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.194827 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8k8ql"] Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.262902 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85cvv" event={"ID":"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88","Type":"ContainerStarted","Data":"cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646"} Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.264775 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9njh" event={"ID":"1743d256-8adb-4a95-a1d3-ec29d932191f","Type":"ContainerStarted","Data":"cb5f4ae9fa71b84665b254972ddd625f3fbd6feb149491959522a8083b949140"} Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.291401 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ff2fz" event={"ID":"03b477be-0073-4390-b87a-acdf508074ee","Type":"ContainerStarted","Data":"a6ca4f624cacbb77ccc37f7837088108df3ba41ad1b65b85e1546f0e058805e5"} Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.292589 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-catalog-content\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.292648 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc8l2\" (UniqueName: \"kubernetes.io/projected/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-kube-api-access-nc8l2\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.292714 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-utilities\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.319015 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ff2fz" podStartSLOduration=2.815495656 podStartE2EDuration="7.31900156s" podCreationTimestamp="2025-11-26 07:19:39 +0000 UTC" firstStartedPulling="2025-11-26 07:19:41.172248806 +0000 UTC m=+1482.692390425" lastFinishedPulling="2025-11-26 07:19:45.67575471 +0000 UTC m=+1487.195896329" observedRunningTime="2025-11-26 07:19:46.31553302 +0000 UTC m=+1487.835674639" watchObservedRunningTime="2025-11-26 07:19:46.31900156 +0000 UTC m=+1487.839143179" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.334472 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x9njh" podStartSLOduration=2.852541528 podStartE2EDuration="6.33445738s" podCreationTimestamp="2025-11-26 07:19:40 +0000 UTC" firstStartedPulling="2025-11-26 07:19:42.183990895 +0000 UTC m=+1483.704132514" lastFinishedPulling="2025-11-26 07:19:45.665906747 +0000 UTC m=+1487.186048366" observedRunningTime="2025-11-26 07:19:46.331945731 +0000 UTC m=+1487.852087370" watchObservedRunningTime="2025-11-26 07:19:46.33445738 +0000 UTC m=+1487.854598989" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.397734 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-utilities\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.397818 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-catalog-content\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.397883 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc8l2\" (UniqueName: \"kubernetes.io/projected/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-kube-api-access-nc8l2\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.398581 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-catalog-content\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.398822 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-utilities\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.418887 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc8l2\" (UniqueName: \"kubernetes.io/projected/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-kube-api-access-nc8l2\") pod \"redhat-operators-8k8ql\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:46 crc kubenswrapper[4940]: I1126 07:19:46.538330 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.017648 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8k8ql"] Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.180144 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-92mvv"] Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.182258 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.189759 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-92mvv"] Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.307457 4940 generic.go:334] "Generic (PLEG): container finished" podID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerID="cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646" exitCode=0 Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.307557 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85cvv" event={"ID":"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88","Type":"ContainerDied","Data":"cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646"} Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.315247 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-utilities\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.315316 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-catalog-content\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.316001 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9q62\" (UniqueName: \"kubernetes.io/projected/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-kube-api-access-p9q62\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.318993 4940 generic.go:334] "Generic (PLEG): container finished" podID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerID="62795aee6275762280c8a0f5558e233b643d59357ca1c6d87df7b15875956069" exitCode=0 Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.319099 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8k8ql" event={"ID":"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7","Type":"ContainerDied","Data":"62795aee6275762280c8a0f5558e233b643d59357ca1c6d87df7b15875956069"} Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.319128 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8k8ql" event={"ID":"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7","Type":"ContainerStarted","Data":"b499aae95657b4f8df445760cd2d14e1290be04dbaef61e68358578c675de4e8"} Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.334284 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.335106 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.417051 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-utilities\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.417147 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-catalog-content\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.417179 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9q62\" (UniqueName: \"kubernetes.io/projected/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-kube-api-access-p9q62\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.417959 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-catalog-content\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.418263 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-utilities\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.429927 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.450185 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9q62\" (UniqueName: \"kubernetes.io/projected/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-kube-api-access-p9q62\") pod \"certified-operators-92mvv\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.504144 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:47 crc kubenswrapper[4940]: I1126 07:19:47.960021 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-92mvv"] Nov 26 07:19:47 crc kubenswrapper[4940]: W1126 07:19:47.966196 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9bcd6c0e_2ac6_4114_a0d2_6d63155d06a4.slice/crio-448f4d428ba8487d7616598fb8078dea3e75a054e5b56dc7420b4398d91bfbc3 WatchSource:0}: Error finding container 448f4d428ba8487d7616598fb8078dea3e75a054e5b56dc7420b4398d91bfbc3: Status 404 returned error can't find the container with id 448f4d428ba8487d7616598fb8078dea3e75a054e5b56dc7420b4398d91bfbc3 Nov 26 07:19:48 crc kubenswrapper[4940]: I1126 07:19:48.332735 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92mvv" event={"ID":"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4","Type":"ContainerStarted","Data":"917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0"} Nov 26 07:19:48 crc kubenswrapper[4940]: I1126 07:19:48.332785 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92mvv" event={"ID":"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4","Type":"ContainerStarted","Data":"448f4d428ba8487d7616598fb8078dea3e75a054e5b56dc7420b4398d91bfbc3"} Nov 26 07:19:48 crc kubenswrapper[4940]: I1126 07:19:48.337110 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85cvv" event={"ID":"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88","Type":"ContainerStarted","Data":"ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859"} Nov 26 07:19:48 crc kubenswrapper[4940]: I1126 07:19:48.396625 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-85cvv" podStartSLOduration=3.65966933 podStartE2EDuration="6.396603544s" podCreationTimestamp="2025-11-26 07:19:42 +0000 UTC" firstStartedPulling="2025-11-26 07:19:45.254444447 +0000 UTC m=+1486.774586066" lastFinishedPulling="2025-11-26 07:19:47.991378661 +0000 UTC m=+1489.511520280" observedRunningTime="2025-11-26 07:19:48.393357032 +0000 UTC m=+1489.913498651" watchObservedRunningTime="2025-11-26 07:19:48.396603544 +0000 UTC m=+1489.916745163" Nov 26 07:19:48 crc kubenswrapper[4940]: I1126 07:19:48.409391 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:19:48 crc kubenswrapper[4940]: I1126 07:19:48.949811 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:48 crc kubenswrapper[4940]: I1126 07:19:48.950148 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:48 crc kubenswrapper[4940]: I1126 07:19:48.995677 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:49 crc kubenswrapper[4940]: I1126 07:19:49.349995 4940 generic.go:334] "Generic (PLEG): container finished" podID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerID="917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0" exitCode=0 Nov 26 07:19:49 crc kubenswrapper[4940]: I1126 07:19:49.350103 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92mvv" event={"ID":"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4","Type":"ContainerDied","Data":"917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0"} Nov 26 07:19:49 crc kubenswrapper[4940]: I1126 07:19:49.352859 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8k8ql" event={"ID":"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7","Type":"ContainerStarted","Data":"2ceb1c4ae25de0eb323af0bbb137361f5cb18503d8beead31ecccc44e49beb53"} Nov 26 07:19:49 crc kubenswrapper[4940]: I1126 07:19:49.410472 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:19:49 crc kubenswrapper[4940]: I1126 07:19:49.894699 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:49 crc kubenswrapper[4940]: I1126 07:19:49.894747 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:19:49 crc kubenswrapper[4940]: I1126 07:19:49.963847 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6f8d6cc986-tlmk2" Nov 26 07:19:50 crc kubenswrapper[4940]: I1126 07:19:50.364750 4940 generic.go:334] "Generic (PLEG): container finished" podID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerID="2ceb1c4ae25de0eb323af0bbb137361f5cb18503d8beead31ecccc44e49beb53" exitCode=0 Nov 26 07:19:50 crc kubenswrapper[4940]: I1126 07:19:50.365230 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8k8ql" event={"ID":"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7","Type":"ContainerDied","Data":"2ceb1c4ae25de0eb323af0bbb137361f5cb18503d8beead31ecccc44e49beb53"} Nov 26 07:19:50 crc kubenswrapper[4940]: I1126 07:19:50.944332 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ff2fz" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="registry-server" probeResult="failure" output=< Nov 26 07:19:50 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 07:19:50 crc kubenswrapper[4940]: > Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.305996 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.306047 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.352372 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.375009 4940 generic.go:334] "Generic (PLEG): container finished" podID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerID="2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2" exitCode=0 Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.375091 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92mvv" event={"ID":"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4","Type":"ContainerDied","Data":"2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2"} Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.377756 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8k8ql" event={"ID":"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7","Type":"ContainerStarted","Data":"185e2151996492234e9c312215a76f565d36f30a09480a2a4b1ebca474569d65"} Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.384500 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5p6nh"] Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.386626 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.399742 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5p6nh"] Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.437340 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.439538 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8k8ql" podStartSLOduration=1.966557791 podStartE2EDuration="5.439518894s" podCreationTimestamp="2025-11-26 07:19:46 +0000 UTC" firstStartedPulling="2025-11-26 07:19:47.320174443 +0000 UTC m=+1488.840316072" lastFinishedPulling="2025-11-26 07:19:50.793135546 +0000 UTC m=+1492.313277175" observedRunningTime="2025-11-26 07:19:51.434307379 +0000 UTC m=+1492.954448998" watchObservedRunningTime="2025-11-26 07:19:51.439518894 +0000 UTC m=+1492.959660513" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.579072 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-catalog-content\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.579125 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-utilities\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.579172 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dk64\" (UniqueName: \"kubernetes.io/projected/647ebab8-d2f6-4cd8-ae64-9a822c756453-kube-api-access-4dk64\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.681195 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-catalog-content\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.681255 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-utilities\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.681308 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dk64\" (UniqueName: \"kubernetes.io/projected/647ebab8-d2f6-4cd8-ae64-9a822c756453-kube-api-access-4dk64\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.681831 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-catalog-content\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.681897 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-utilities\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.706026 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dk64\" (UniqueName: \"kubernetes.io/projected/647ebab8-d2f6-4cd8-ae64-9a822c756453-kube-api-access-4dk64\") pod \"redhat-marketplace-5p6nh\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:51 crc kubenswrapper[4940]: I1126 07:19:51.709909 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.179027 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5rdpw"] Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.181064 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.192693 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5rdpw"] Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.229391 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5p6nh"] Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.289453 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-catalog-content\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.292122 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqwt4\" (UniqueName: \"kubernetes.io/projected/61dabfc2-b66f-4c50-b217-7c58f2fd4725-kube-api-access-kqwt4\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.292234 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-utilities\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.394107 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-catalog-content\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.394548 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-catalog-content\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.395416 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqwt4\" (UniqueName: \"kubernetes.io/projected/61dabfc2-b66f-4c50-b217-7c58f2fd4725-kube-api-access-kqwt4\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.395596 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-utilities\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.395707 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92mvv" event={"ID":"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4","Type":"ContainerStarted","Data":"b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691"} Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.396157 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-utilities\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.400365 4940 generic.go:334] "Generic (PLEG): container finished" podID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerID="0b4a38b88019c22d3505210d695c193d7f48242be69824926e6c048ceef474db" exitCode=0 Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.401736 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5p6nh" event={"ID":"647ebab8-d2f6-4cd8-ae64-9a822c756453","Type":"ContainerDied","Data":"0b4a38b88019c22d3505210d695c193d7f48242be69824926e6c048ceef474db"} Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.401775 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5p6nh" event={"ID":"647ebab8-d2f6-4cd8-ae64-9a822c756453","Type":"ContainerStarted","Data":"967bd9eadfe7a2b1ab6baa521075371ad6481a362907be7e5721d3ce55d7e732"} Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.424315 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqwt4\" (UniqueName: \"kubernetes.io/projected/61dabfc2-b66f-4c50-b217-7c58f2fd4725-kube-api-access-kqwt4\") pod \"redhat-operators-5rdpw\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.425257 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-92mvv" podStartSLOduration=2.958823529 podStartE2EDuration="5.425237733s" podCreationTimestamp="2025-11-26 07:19:47 +0000 UTC" firstStartedPulling="2025-11-26 07:19:49.352220249 +0000 UTC m=+1490.872361888" lastFinishedPulling="2025-11-26 07:19:51.818634473 +0000 UTC m=+1493.338776092" observedRunningTime="2025-11-26 07:19:52.422153845 +0000 UTC m=+1493.942295464" watchObservedRunningTime="2025-11-26 07:19:52.425237733 +0000 UTC m=+1493.945379352" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.495396 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.555196 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.555272 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:52 crc kubenswrapper[4940]: I1126 07:19:52.632362 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:53 crc kubenswrapper[4940]: I1126 07:19:53.016676 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5rdpw"] Nov 26 07:19:53 crc kubenswrapper[4940]: I1126 07:19:53.409737 4940 generic.go:334] "Generic (PLEG): container finished" podID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerID="8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134" exitCode=0 Nov 26 07:19:53 crc kubenswrapper[4940]: I1126 07:19:53.409887 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rdpw" event={"ID":"61dabfc2-b66f-4c50-b217-7c58f2fd4725","Type":"ContainerDied","Data":"8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134"} Nov 26 07:19:53 crc kubenswrapper[4940]: I1126 07:19:53.410129 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rdpw" event={"ID":"61dabfc2-b66f-4c50-b217-7c58f2fd4725","Type":"ContainerStarted","Data":"46b62722647437ddb9f87e660acae9213317ffd734adbd1a650986237f31a394"} Nov 26 07:19:53 crc kubenswrapper[4940]: I1126 07:19:53.414841 4940 generic.go:334] "Generic (PLEG): container finished" podID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerID="b4be4a2c51d29d4bdda835eb0bd45ac46d005ede37dc9ec4623836d77abeca48" exitCode=0 Nov 26 07:19:53 crc kubenswrapper[4940]: I1126 07:19:53.414909 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5p6nh" event={"ID":"647ebab8-d2f6-4cd8-ae64-9a822c756453","Type":"ContainerDied","Data":"b4be4a2c51d29d4bdda835eb0bd45ac46d005ede37dc9ec4623836d77abeca48"} Nov 26 07:19:53 crc kubenswrapper[4940]: I1126 07:19:53.472948 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:19:54 crc kubenswrapper[4940]: I1126 07:19:54.426092 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5p6nh" event={"ID":"647ebab8-d2f6-4cd8-ae64-9a822c756453","Type":"ContainerStarted","Data":"550e970c6334a62d6813bdfcfbfbc0201455fb5ed8768607616cf3d321542e01"} Nov 26 07:19:54 crc kubenswrapper[4940]: I1126 07:19:54.429218 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rdpw" event={"ID":"61dabfc2-b66f-4c50-b217-7c58f2fd4725","Type":"ContainerStarted","Data":"53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509"} Nov 26 07:19:54 crc kubenswrapper[4940]: I1126 07:19:54.448319 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5p6nh" podStartSLOduration=1.961726533 podStartE2EDuration="3.448300554s" podCreationTimestamp="2025-11-26 07:19:51 +0000 UTC" firstStartedPulling="2025-11-26 07:19:52.402877955 +0000 UTC m=+1493.923019564" lastFinishedPulling="2025-11-26 07:19:53.889451966 +0000 UTC m=+1495.409593585" observedRunningTime="2025-11-26 07:19:54.446105655 +0000 UTC m=+1495.966247284" watchObservedRunningTime="2025-11-26 07:19:54.448300554 +0000 UTC m=+1495.968442173" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.388515 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w7c4d"] Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.391133 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.407140 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w7c4d"] Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.458659 4940 generic.go:334] "Generic (PLEG): container finished" podID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerID="53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509" exitCode=0 Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.458718 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rdpw" event={"ID":"61dabfc2-b66f-4c50-b217-7c58f2fd4725","Type":"ContainerDied","Data":"53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509"} Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.538909 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.539450 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.553486 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6b6zb\" (UniqueName: \"kubernetes.io/projected/907d2835-1aee-4b5a-9726-a75946007030-kube-api-access-6b6zb\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.553569 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-utilities\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.553594 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-catalog-content\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.589746 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.655278 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6b6zb\" (UniqueName: \"kubernetes.io/projected/907d2835-1aee-4b5a-9726-a75946007030-kube-api-access-6b6zb\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.655366 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-utilities\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.655389 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-catalog-content\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.655911 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-catalog-content\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.656067 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-utilities\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.677020 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6b6zb\" (UniqueName: \"kubernetes.io/projected/907d2835-1aee-4b5a-9726-a75946007030-kube-api-access-6b6zb\") pod \"certified-operators-w7c4d\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:56 crc kubenswrapper[4940]: I1126 07:19:56.760749 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.181385 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w7c4d"] Nov 26 07:19:57 crc kubenswrapper[4940]: W1126 07:19:57.182350 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod907d2835_1aee_4b5a_9726_a75946007030.slice/crio-2e502d94231d66fc93f79568084694b5ce03be2bdc03742bfcecd3f385e65b72 WatchSource:0}: Error finding container 2e502d94231d66fc93f79568084694b5ce03be2bdc03742bfcecd3f385e65b72: Status 404 returned error can't find the container with id 2e502d94231d66fc93f79568084694b5ce03be2bdc03742bfcecd3f385e65b72 Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.384861 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-flxv9"] Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.386349 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.390941 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-flxv9"] Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.469145 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rdpw" event={"ID":"61dabfc2-b66f-4c50-b217-7c58f2fd4725","Type":"ContainerStarted","Data":"57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d"} Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.479492 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8l62\" (UniqueName: \"kubernetes.io/projected/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-kube-api-access-g8l62\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.479691 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-catalog-content\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.479867 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-utilities\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.480080 4940 generic.go:334] "Generic (PLEG): container finished" podID="907d2835-1aee-4b5a-9726-a75946007030" containerID="99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472" exitCode=0 Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.480296 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7c4d" event={"ID":"907d2835-1aee-4b5a-9726-a75946007030","Type":"ContainerDied","Data":"99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472"} Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.480332 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7c4d" event={"ID":"907d2835-1aee-4b5a-9726-a75946007030","Type":"ContainerStarted","Data":"2e502d94231d66fc93f79568084694b5ce03be2bdc03742bfcecd3f385e65b72"} Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.502005 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5rdpw" podStartSLOduration=2.01596924 podStartE2EDuration="5.501987135s" podCreationTimestamp="2025-11-26 07:19:52 +0000 UTC" firstStartedPulling="2025-11-26 07:19:53.411592931 +0000 UTC m=+1494.931734550" lastFinishedPulling="2025-11-26 07:19:56.897610826 +0000 UTC m=+1498.417752445" observedRunningTime="2025-11-26 07:19:57.496831532 +0000 UTC m=+1499.016973151" watchObservedRunningTime="2025-11-26 07:19:57.501987135 +0000 UTC m=+1499.022128754" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.504987 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.505027 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.538866 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.552792 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.580918 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-catalog-content\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.581005 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-utilities\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.581027 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8l62\" (UniqueName: \"kubernetes.io/projected/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-kube-api-access-g8l62\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.582326 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-utilities\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.582380 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-catalog-content\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.605014 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8l62\" (UniqueName: \"kubernetes.io/projected/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-kube-api-access-g8l62\") pod \"redhat-marketplace-flxv9\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:57 crc kubenswrapper[4940]: I1126 07:19:57.710250 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:19:58 crc kubenswrapper[4940]: I1126 07:19:58.207297 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-flxv9"] Nov 26 07:19:58 crc kubenswrapper[4940]: I1126 07:19:58.489878 4940 generic.go:334] "Generic (PLEG): container finished" podID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerID="411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf" exitCode=0 Nov 26 07:19:58 crc kubenswrapper[4940]: I1126 07:19:58.489921 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-flxv9" event={"ID":"a5c11eb7-a10a-486d-a692-2f3196c2cdbd","Type":"ContainerDied","Data":"411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf"} Nov 26 07:19:58 crc kubenswrapper[4940]: I1126 07:19:58.490343 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-flxv9" event={"ID":"a5c11eb7-a10a-486d-a692-2f3196c2cdbd","Type":"ContainerStarted","Data":"fb0f91ebc879f67e557532ad02e73d0744d9db82742d932c8872cefe569944d7"} Nov 26 07:19:58 crc kubenswrapper[4940]: I1126 07:19:58.492264 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7c4d" event={"ID":"907d2835-1aee-4b5a-9726-a75946007030","Type":"ContainerStarted","Data":"7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a"} Nov 26 07:19:58 crc kubenswrapper[4940]: I1126 07:19:58.553984 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:19:59 crc kubenswrapper[4940]: I1126 07:19:59.502971 4940 generic.go:334] "Generic (PLEG): container finished" podID="907d2835-1aee-4b5a-9726-a75946007030" containerID="7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a" exitCode=0 Nov 26 07:19:59 crc kubenswrapper[4940]: I1126 07:19:59.503015 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7c4d" event={"ID":"907d2835-1aee-4b5a-9726-a75946007030","Type":"ContainerDied","Data":"7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a"} Nov 26 07:19:59 crc kubenswrapper[4940]: I1126 07:19:59.506698 4940 generic.go:334] "Generic (PLEG): container finished" podID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerID="bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82" exitCode=0 Nov 26 07:19:59 crc kubenswrapper[4940]: I1126 07:19:59.506791 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-flxv9" event={"ID":"a5c11eb7-a10a-486d-a692-2f3196c2cdbd","Type":"ContainerDied","Data":"bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82"} Nov 26 07:19:59 crc kubenswrapper[4940]: I1126 07:19:59.946752 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:20:00 crc kubenswrapper[4940]: I1126 07:20:00.030661 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:20:00 crc kubenswrapper[4940]: I1126 07:20:00.529989 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-flxv9" event={"ID":"a5c11eb7-a10a-486d-a692-2f3196c2cdbd","Type":"ContainerStarted","Data":"ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da"} Nov 26 07:20:00 crc kubenswrapper[4940]: I1126 07:20:00.535080 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7c4d" event={"ID":"907d2835-1aee-4b5a-9726-a75946007030","Type":"ContainerStarted","Data":"4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243"} Nov 26 07:20:00 crc kubenswrapper[4940]: I1126 07:20:00.551733 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-flxv9" podStartSLOduration=1.877188472 podStartE2EDuration="3.551702771s" podCreationTimestamp="2025-11-26 07:19:57 +0000 UTC" firstStartedPulling="2025-11-26 07:19:58.491889457 +0000 UTC m=+1500.012031076" lastFinishedPulling="2025-11-26 07:20:00.166403746 +0000 UTC m=+1501.686545375" observedRunningTime="2025-11-26 07:20:00.549243553 +0000 UTC m=+1502.069385172" watchObservedRunningTime="2025-11-26 07:20:00.551702771 +0000 UTC m=+1502.071844390" Nov 26 07:20:00 crc kubenswrapper[4940]: I1126 07:20:00.568790 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w7c4d" podStartSLOduration=1.95613751 podStartE2EDuration="4.568771841s" podCreationTimestamp="2025-11-26 07:19:56 +0000 UTC" firstStartedPulling="2025-11-26 07:19:57.48253936 +0000 UTC m=+1499.002680989" lastFinishedPulling="2025-11-26 07:20:00.095173701 +0000 UTC m=+1501.615315320" observedRunningTime="2025-11-26 07:20:00.567418578 +0000 UTC m=+1502.087560197" watchObservedRunningTime="2025-11-26 07:20:00.568771841 +0000 UTC m=+1502.088913460" Nov 26 07:20:00 crc kubenswrapper[4940]: I1126 07:20:00.979206 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ltrj6"] Nov 26 07:20:00 crc kubenswrapper[4940]: I1126 07:20:00.980915 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:00 crc kubenswrapper[4940]: I1126 07:20:00.990479 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ltrj6"] Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.098019 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-catalog-content\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.098254 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-utilities\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.098383 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vmlv\" (UniqueName: \"kubernetes.io/projected/ebba4d6c-39d4-481b-acbf-92b6dab82439-kube-api-access-5vmlv\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.199917 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-utilities\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.200014 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vmlv\" (UniqueName: \"kubernetes.io/projected/ebba4d6c-39d4-481b-acbf-92b6dab82439-kube-api-access-5vmlv\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.200125 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-catalog-content\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.200659 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-catalog-content\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.200737 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-utilities\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.225702 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vmlv\" (UniqueName: \"kubernetes.io/projected/ebba4d6c-39d4-481b-acbf-92b6dab82439-kube-api-access-5vmlv\") pod \"redhat-operators-ltrj6\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.296788 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.712394 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.712674 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.758353 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.787708 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g2sqf"] Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.792718 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.806975 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g2sqf"] Nov 26 07:20:01 crc kubenswrapper[4940]: W1126 07:20:01.851669 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebba4d6c_39d4_481b_acbf_92b6dab82439.slice/crio-f28564199b522356232fa69c0b397d068692d1507a79c70b43f924cf4ef62e2b WatchSource:0}: Error finding container f28564199b522356232fa69c0b397d068692d1507a79c70b43f924cf4ef62e2b: Status 404 returned error can't find the container with id f28564199b522356232fa69c0b397d068692d1507a79c70b43f924cf4ef62e2b Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.858661 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ltrj6"] Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.917576 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbsv6\" (UniqueName: \"kubernetes.io/projected/3c5158dd-e8a1-48fa-a742-32c73dafd95b-kube-api-access-dbsv6\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.917664 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-catalog-content\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:01 crc kubenswrapper[4940]: I1126 07:20:01.917712 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-utilities\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.019063 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbsv6\" (UniqueName: \"kubernetes.io/projected/3c5158dd-e8a1-48fa-a742-32c73dafd95b-kube-api-access-dbsv6\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.019142 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-catalog-content\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.019175 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-utilities\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.019633 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-utilities\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.019840 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-catalog-content\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.050204 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbsv6\" (UniqueName: \"kubernetes.io/projected/3c5158dd-e8a1-48fa-a742-32c73dafd95b-kube-api-access-dbsv6\") pod \"certified-operators-g2sqf\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.111097 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.496191 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.496621 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.553963 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltrj6" event={"ID":"ebba4d6c-39d4-481b-acbf-92b6dab82439","Type":"ContainerStarted","Data":"f28564199b522356232fa69c0b397d068692d1507a79c70b43f924cf4ef62e2b"} Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.574608 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.613488 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g2sqf"] Nov 26 07:20:02 crc kubenswrapper[4940]: W1126 07:20:02.619753 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c5158dd_e8a1_48fa_a742_32c73dafd95b.slice/crio-1be4b75a524534064ec6eba547d04d1bc1bd0c611222ad01925d32ab990a9d09 WatchSource:0}: Error finding container 1be4b75a524534064ec6eba547d04d1bc1bd0c611222ad01925d32ab990a9d09: Status 404 returned error can't find the container with id 1be4b75a524534064ec6eba547d04d1bc1bd0c611222ad01925d32ab990a9d09 Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.637866 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:20:02 crc kubenswrapper[4940]: I1126 07:20:02.642665 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.568106 4940 generic.go:334] "Generic (PLEG): container finished" podID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerID="f018787d2db1a2000b5b7ec2e40095408e768e5fed776e8bada55c6604b5b7de" exitCode=0 Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.568379 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltrj6" event={"ID":"ebba4d6c-39d4-481b-acbf-92b6dab82439","Type":"ContainerDied","Data":"f018787d2db1a2000b5b7ec2e40095408e768e5fed776e8bada55c6604b5b7de"} Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.573920 4940 generic.go:334] "Generic (PLEG): container finished" podID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerID="0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8" exitCode=0 Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.574767 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g2sqf" event={"ID":"3c5158dd-e8a1-48fa-a742-32c73dafd95b","Type":"ContainerDied","Data":"0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8"} Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.574848 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g2sqf" event={"ID":"3c5158dd-e8a1-48fa-a742-32c73dafd95b","Type":"ContainerStarted","Data":"1be4b75a524534064ec6eba547d04d1bc1bd0c611222ad01925d32ab990a9d09"} Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.578070 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-974pq"] Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.581073 4940 scope.go:117] "RemoveContainer" containerID="28e2a242740bd54e7e70c7584d5427f94678a06152e93baf68d358346b444d34" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.590328 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.605089 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-974pq"] Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.627799 4940 scope.go:117] "RemoveContainer" containerID="ad46d95e4b31516777f6595e28850d4fd7fe0cf1601ab7e6e62e24919711b1e3" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.645432 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-catalog-content\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.645524 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rqlb\" (UniqueName: \"kubernetes.io/projected/6506e24e-d48f-4bd2-96b7-d32eb555dc79-kube-api-access-2rqlb\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.645552 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-utilities\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.660148 4940 scope.go:117] "RemoveContainer" containerID="544aae1a724f7ff43db122d3db2ab2e7c44082064a7f75e5a466aae86843c5f8" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.697105 4940 scope.go:117] "RemoveContainer" containerID="b2079e20c2f7eb5ab7bfd36841e069cbf0408264fccf23ef33aac1c97d13b0b7" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.746815 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rqlb\" (UniqueName: \"kubernetes.io/projected/6506e24e-d48f-4bd2-96b7-d32eb555dc79-kube-api-access-2rqlb\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.746867 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-utilities\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.746958 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-catalog-content\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.747529 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-utilities\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.747600 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-catalog-content\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.764297 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rqlb\" (UniqueName: \"kubernetes.io/projected/6506e24e-d48f-4bd2-96b7-d32eb555dc79-kube-api-access-2rqlb\") pod \"redhat-marketplace-974pq\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.837690 4940 scope.go:117] "RemoveContainer" containerID="b6e397d9a1c3b9e439eaee34e7f6ab2d5cca1c4d5672b6ac2c308c39f31ba662" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.894926 4940 scope.go:117] "RemoveContainer" containerID="0fa107bb0b676759b848d5b7a9c66cd2a36b722337627f11a3312667c0397d04" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.943300 4940 scope.go:117] "RemoveContainer" containerID="314e4c503b3e92e3d40e785cc0e21ddd070fa647cdcb8a831a94293baea43f92" Nov 26 07:20:03 crc kubenswrapper[4940]: I1126 07:20:03.994993 4940 scope.go:117] "RemoveContainer" containerID="db3f2b385b9b5f08606b07783338d477465859be0fc661086c63825760b6c395" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.011731 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.035544 4940 scope.go:117] "RemoveContainer" containerID="06eb10118d6f67ed17dc4673b5c0fe39fb295a586f54890566fc42f673a7a34f" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.078910 4940 scope.go:117] "RemoveContainer" containerID="ac0e35b82f07eb7f9962a21908841a4ad2dc0d4537b49b3d218f50d447c56cc2" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.120741 4940 scope.go:117] "RemoveContainer" containerID="ed816dacf55ec0bc4c721ede36d0a5e84c165a7151e932b5a7869aa986b4649d" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.149118 4940 scope.go:117] "RemoveContainer" containerID="d2e2b5615ecd67dfc4f4c70f5e276eed17c63b8bfbb7e591c55bc593760db45c" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.184274 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dzxrb"] Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.189193 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.195678 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dzxrb"] Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.255546 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-utilities\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.255716 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xd5bc\" (UniqueName: \"kubernetes.io/projected/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-kube-api-access-xd5bc\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.255806 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-catalog-content\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.313796 4940 scope.go:117] "RemoveContainer" containerID="af39f43206ed9e8c3359edcad03f5b9e9c03901dffc2f65deec2edf61fc17246" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.345936 4940 scope.go:117] "RemoveContainer" containerID="b94c506e6086ba8b91c87d459cec6ced6c7d24b11d96185c34ecdfc3ae960f0b" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.356971 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-utilities\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.357082 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xd5bc\" (UniqueName: \"kubernetes.io/projected/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-kube-api-access-xd5bc\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.357140 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-catalog-content\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.357772 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-catalog-content\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.357786 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-utilities\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.375679 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xd5bc\" (UniqueName: \"kubernetes.io/projected/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-kube-api-access-xd5bc\") pod \"redhat-operators-dzxrb\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.389150 4940 scope.go:117] "RemoveContainer" containerID="bca968db600092021a60c505e8480f04c4ab1d2e4deffaa260a8603ed550dbac" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.409369 4940 scope.go:117] "RemoveContainer" containerID="a219095e440f0eedb95f44ff7b1916f2f087302b42b8e8e4c92e06a7d851bbeb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.450295 4940 scope.go:117] "RemoveContainer" containerID="5d90b332961fb439346d759e5988a9dafd760f766c76d53447f03e456e68a27f" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.512934 4940 scope.go:117] "RemoveContainer" containerID="6b020022938f10660afac714fd60b211b8b08756c8097b70ff711d6b7d8686d0" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.525946 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.568190 4940 scope.go:117] "RemoveContainer" containerID="5e79fd5689911f61381e13cf74ad508b7c002ca284841769b93df7d781c78f0b" Nov 26 07:20:04 crc kubenswrapper[4940]: I1126 07:20:04.573354 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-974pq"] Nov 26 07:20:04 crc kubenswrapper[4940]: W1126 07:20:04.574473 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6506e24e_d48f_4bd2_96b7_d32eb555dc79.slice/crio-06bd9286d8d8a2d541669fd6d22963b019d093effce574f34dca6f0740c6766f WatchSource:0}: Error finding container 06bd9286d8d8a2d541669fd6d22963b019d093effce574f34dca6f0740c6766f: Status 404 returned error can't find the container with id 06bd9286d8d8a2d541669fd6d22963b019d093effce574f34dca6f0740c6766f Nov 26 07:20:05 crc kubenswrapper[4940]: I1126 07:20:05.100274 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dzxrb"] Nov 26 07:20:05 crc kubenswrapper[4940]: I1126 07:20:05.663348 4940 generic.go:334] "Generic (PLEG): container finished" podID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerID="2962b97c54e9693b5efd9036acfd1f4930ff2c80e9f10c3aeba8a34fdbbaaf3a" exitCode=0 Nov 26 07:20:05 crc kubenswrapper[4940]: I1126 07:20:05.663838 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974pq" event={"ID":"6506e24e-d48f-4bd2-96b7-d32eb555dc79","Type":"ContainerDied","Data":"2962b97c54e9693b5efd9036acfd1f4930ff2c80e9f10c3aeba8a34fdbbaaf3a"} Nov 26 07:20:05 crc kubenswrapper[4940]: I1126 07:20:05.663876 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974pq" event={"ID":"6506e24e-d48f-4bd2-96b7-d32eb555dc79","Type":"ContainerStarted","Data":"06bd9286d8d8a2d541669fd6d22963b019d093effce574f34dca6f0740c6766f"} Nov 26 07:20:05 crc kubenswrapper[4940]: I1126 07:20:05.674914 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzxrb" event={"ID":"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91","Type":"ContainerStarted","Data":"dc60c9e48df89c83ddefde98c9a9a37ee9101f7d225977ccda712d67da363b0a"} Nov 26 07:20:05 crc kubenswrapper[4940]: I1126 07:20:05.977150 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9b495"] Nov 26 07:20:05 crc kubenswrapper[4940]: I1126 07:20:05.981004 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:05 crc kubenswrapper[4940]: I1126 07:20:05.992862 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9b495"] Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.100504 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-utilities\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.100551 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4d6l\" (UniqueName: \"kubernetes.io/projected/d9a480fc-1e8a-4f40-8add-a75b75641f4e-kube-api-access-p4d6l\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.100779 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-catalog-content\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.202283 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-utilities\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.202338 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4d6l\" (UniqueName: \"kubernetes.io/projected/d9a480fc-1e8a-4f40-8add-a75b75641f4e-kube-api-access-p4d6l\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.202383 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-catalog-content\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.204263 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-catalog-content\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.204317 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-utilities\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.226421 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4d6l\" (UniqueName: \"kubernetes.io/projected/d9a480fc-1e8a-4f40-8add-a75b75641f4e-kube-api-access-p4d6l\") pod \"certified-operators-9b495\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.367223 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.578824 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bb2nd"] Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.582833 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.593281 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb2nd"] Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.686524 4940 generic.go:334] "Generic (PLEG): container finished" podID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerID="8d2d4ca96287deb3ef1ff551058e5550dde9dbdb9e6d7b04c6f35d349c51a618" exitCode=0 Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.686588 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltrj6" event={"ID":"ebba4d6c-39d4-481b-acbf-92b6dab82439","Type":"ContainerDied","Data":"8d2d4ca96287deb3ef1ff551058e5550dde9dbdb9e6d7b04c6f35d349c51a618"} Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.689460 4940 generic.go:334] "Generic (PLEG): container finished" podID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerID="d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2" exitCode=0 Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.689518 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzxrb" event={"ID":"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91","Type":"ContainerDied","Data":"d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2"} Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.692243 4940 generic.go:334] "Generic (PLEG): container finished" podID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerID="6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9" exitCode=0 Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.692688 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g2sqf" event={"ID":"3c5158dd-e8a1-48fa-a742-32c73dafd95b","Type":"ContainerDied","Data":"6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9"} Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.695416 4940 generic.go:334] "Generic (PLEG): container finished" podID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerID="9d4ba8e428173cf5d2ffd87de437b788ab95dd285984ca179c5ffb25df5c4d24" exitCode=0 Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.695447 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974pq" event={"ID":"6506e24e-d48f-4bd2-96b7-d32eb555dc79","Type":"ContainerDied","Data":"9d4ba8e428173cf5d2ffd87de437b788ab95dd285984ca179c5ffb25df5c4d24"} Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.709049 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-catalog-content\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.709196 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-utilities\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.709225 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pst7r\" (UniqueName: \"kubernetes.io/projected/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-kube-api-access-pst7r\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.761107 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.761329 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.801256 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.811122 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-utilities\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.811780 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pst7r\" (UniqueName: \"kubernetes.io/projected/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-kube-api-access-pst7r\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.811839 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-catalog-content\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.813135 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-utilities\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.813246 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-catalog-content\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.835464 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pst7r\" (UniqueName: \"kubernetes.io/projected/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-kube-api-access-pst7r\") pod \"redhat-marketplace-bb2nd\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:06 crc kubenswrapper[4940]: W1126 07:20:06.838285 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd9a480fc_1e8a_4f40_8add_a75b75641f4e.slice/crio-274bb19b3e3f055fcea216b3aa9c43890c582ef64c21b073e7d781d18a127332 WatchSource:0}: Error finding container 274bb19b3e3f055fcea216b3aa9c43890c582ef64c21b073e7d781d18a127332: Status 404 returned error can't find the container with id 274bb19b3e3f055fcea216b3aa9c43890c582ef64c21b073e7d781d18a127332 Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.842628 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9b495"] Nov 26 07:20:06 crc kubenswrapper[4940]: I1126 07:20:06.907903 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.427749 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb2nd"] Nov 26 07:20:07 crc kubenswrapper[4940]: W1126 07:20:07.442998 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf53b29f5_a2cb_45a2_84ea_71322b48ea8b.slice/crio-d8bd9da36b0c720bcd881749e96b54b31848c2cc46b72a9b9c487253e0726132 WatchSource:0}: Error finding container d8bd9da36b0c720bcd881749e96b54b31848c2cc46b72a9b9c487253e0726132: Status 404 returned error can't find the container with id d8bd9da36b0c720bcd881749e96b54b31848c2cc46b72a9b9c487253e0726132 Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.710458 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.710712 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltrj6" event={"ID":"ebba4d6c-39d4-481b-acbf-92b6dab82439","Type":"ContainerStarted","Data":"83a94c79915de2c5dc795c02d7a2a9667d05534015e70bb454e2d116a23d3d74"} Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.711246 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.715709 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974pq" event={"ID":"6506e24e-d48f-4bd2-96b7-d32eb555dc79","Type":"ContainerStarted","Data":"7bcd52de79ac31cee8828ed5e62930ffbb17fbe8e76eadb32e2ce022cb1b7c89"} Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.718576 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzxrb" event={"ID":"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91","Type":"ContainerStarted","Data":"e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0"} Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.723833 4940 generic.go:334] "Generic (PLEG): container finished" podID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerID="54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f" exitCode=0 Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.723899 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb2nd" event={"ID":"f53b29f5-a2cb-45a2-84ea-71322b48ea8b","Type":"ContainerDied","Data":"54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f"} Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.723921 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb2nd" event={"ID":"f53b29f5-a2cb-45a2-84ea-71322b48ea8b","Type":"ContainerStarted","Data":"d8bd9da36b0c720bcd881749e96b54b31848c2cc46b72a9b9c487253e0726132"} Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.732399 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g2sqf" event={"ID":"3c5158dd-e8a1-48fa-a742-32c73dafd95b","Type":"ContainerStarted","Data":"977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14"} Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.738500 4940 generic.go:334] "Generic (PLEG): container finished" podID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerID="c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2" exitCode=0 Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.741836 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b495" event={"ID":"d9a480fc-1e8a-4f40-8add-a75b75641f4e","Type":"ContainerDied","Data":"c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2"} Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.745706 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b495" event={"ID":"d9a480fc-1e8a-4f40-8add-a75b75641f4e","Type":"ContainerStarted","Data":"274bb19b3e3f055fcea216b3aa9c43890c582ef64c21b073e7d781d18a127332"} Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.749275 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ltrj6" podStartSLOduration=4.165742987 podStartE2EDuration="7.749254378s" podCreationTimestamp="2025-11-26 07:20:00 +0000 UTC" firstStartedPulling="2025-11-26 07:20:03.578297095 +0000 UTC m=+1505.098438714" lastFinishedPulling="2025-11-26 07:20:07.161808486 +0000 UTC m=+1508.681950105" observedRunningTime="2025-11-26 07:20:07.735628957 +0000 UTC m=+1509.255770566" watchObservedRunningTime="2025-11-26 07:20:07.749254378 +0000 UTC m=+1509.269395997" Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.790516 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.797942 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.833605 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g2sqf" podStartSLOduration=3.261922742 podStartE2EDuration="6.833588748s" podCreationTimestamp="2025-11-26 07:20:01 +0000 UTC" firstStartedPulling="2025-11-26 07:20:03.578716388 +0000 UTC m=+1505.098858007" lastFinishedPulling="2025-11-26 07:20:07.150382394 +0000 UTC m=+1508.670524013" observedRunningTime="2025-11-26 07:20:07.830442438 +0000 UTC m=+1509.350584067" watchObservedRunningTime="2025-11-26 07:20:07.833588748 +0000 UTC m=+1509.353730367" Nov 26 07:20:07 crc kubenswrapper[4940]: I1126 07:20:07.874478 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-974pq" podStartSLOduration=3.436426186 podStartE2EDuration="4.874464141s" podCreationTimestamp="2025-11-26 07:20:03 +0000 UTC" firstStartedPulling="2025-11-26 07:20:05.692598284 +0000 UTC m=+1507.212739903" lastFinishedPulling="2025-11-26 07:20:07.130636239 +0000 UTC m=+1508.650777858" observedRunningTime="2025-11-26 07:20:07.869281567 +0000 UTC m=+1509.389423186" watchObservedRunningTime="2025-11-26 07:20:07.874464141 +0000 UTC m=+1509.394605760" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.380991 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fh5xx"] Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.382788 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.395919 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fh5xx"] Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.447553 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-catalog-content\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.447633 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc4qp\" (UniqueName: \"kubernetes.io/projected/29459127-09a2-47c2-b6ca-3b76342e6e04-kube-api-access-gc4qp\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.447688 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-utilities\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.549388 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-utilities\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.549585 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-catalog-content\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.549765 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc4qp\" (UniqueName: \"kubernetes.io/projected/29459127-09a2-47c2-b6ca-3b76342e6e04-kube-api-access-gc4qp\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.550145 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-catalog-content\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.550170 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-utilities\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.581220 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc4qp\" (UniqueName: \"kubernetes.io/projected/29459127-09a2-47c2-b6ca-3b76342e6e04-kube-api-access-gc4qp\") pod \"redhat-operators-fh5xx\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.724847 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.755499 4940 generic.go:334] "Generic (PLEG): container finished" podID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerID="e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0" exitCode=0 Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.755604 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzxrb" event={"ID":"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91","Type":"ContainerDied","Data":"e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0"} Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.762564 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb2nd" event={"ID":"f53b29f5-a2cb-45a2-84ea-71322b48ea8b","Type":"ContainerStarted","Data":"2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5"} Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.828776 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.994858 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x54z7"] Nov 26 07:20:08 crc kubenswrapper[4940]: I1126 07:20:08.996676 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.010659 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x54z7"] Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.061755 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qrkk\" (UniqueName: \"kubernetes.io/projected/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-kube-api-access-2qrkk\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.061809 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-catalog-content\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.061852 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-utilities\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.163754 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qrkk\" (UniqueName: \"kubernetes.io/projected/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-kube-api-access-2qrkk\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.163807 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-catalog-content\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.163851 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-utilities\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.164438 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-utilities\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.164434 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-catalog-content\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.186859 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qrkk\" (UniqueName: \"kubernetes.io/projected/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-kube-api-access-2qrkk\") pod \"certified-operators-x54z7\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.278632 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fh5xx"] Nov 26 07:20:09 crc kubenswrapper[4940]: W1126 07:20:09.295024 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29459127_09a2_47c2_b6ca_3b76342e6e04.slice/crio-1bcd9e2bea9ea492edfbee1bd6c57e7e277d41b232539f96fc1f1d16ca927ebf WatchSource:0}: Error finding container 1bcd9e2bea9ea492edfbee1bd6c57e7e277d41b232539f96fc1f1d16ca927ebf: Status 404 returned error can't find the container with id 1bcd9e2bea9ea492edfbee1bd6c57e7e277d41b232539f96fc1f1d16ca927ebf Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.326206 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.626234 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x54z7"] Nov 26 07:20:09 crc kubenswrapper[4940]: W1126 07:20:09.633419 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c6b3855_b23e_46a6_abfd_47c2ab2c4b5f.slice/crio-6c0ea66e149af2addaf4dd70a43b9e04ab89693ce8d6d55bd10317f82f1bf92e WatchSource:0}: Error finding container 6c0ea66e149af2addaf4dd70a43b9e04ab89693ce8d6d55bd10317f82f1bf92e: Status 404 returned error can't find the container with id 6c0ea66e149af2addaf4dd70a43b9e04ab89693ce8d6d55bd10317f82f1bf92e Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.771211 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x54z7" event={"ID":"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f","Type":"ContainerStarted","Data":"6c0ea66e149af2addaf4dd70a43b9e04ab89693ce8d6d55bd10317f82f1bf92e"} Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.773968 4940 generic.go:334] "Generic (PLEG): container finished" podID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerID="dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b" exitCode=0 Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.774149 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b495" event={"ID":"d9a480fc-1e8a-4f40-8add-a75b75641f4e","Type":"ContainerDied","Data":"dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b"} Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.775761 4940 generic.go:334] "Generic (PLEG): container finished" podID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerID="b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3" exitCode=0 Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.775805 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fh5xx" event={"ID":"29459127-09a2-47c2-b6ca-3b76342e6e04","Type":"ContainerDied","Data":"b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3"} Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.775822 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fh5xx" event={"ID":"29459127-09a2-47c2-b6ca-3b76342e6e04","Type":"ContainerStarted","Data":"1bcd9e2bea9ea492edfbee1bd6c57e7e277d41b232539f96fc1f1d16ca927ebf"} Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.780883 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzxrb" event={"ID":"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91","Type":"ContainerStarted","Data":"104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d"} Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.783915 4940 generic.go:334] "Generic (PLEG): container finished" podID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerID="2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5" exitCode=0 Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.785179 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb2nd" event={"ID":"f53b29f5-a2cb-45a2-84ea-71322b48ea8b","Type":"ContainerDied","Data":"2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5"} Nov 26 07:20:09 crc kubenswrapper[4940]: I1126 07:20:09.873009 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dzxrb" podStartSLOduration=3.353325006 podStartE2EDuration="5.872979535s" podCreationTimestamp="2025-11-26 07:20:04 +0000 UTC" firstStartedPulling="2025-11-26 07:20:06.692949465 +0000 UTC m=+1508.213091084" lastFinishedPulling="2025-11-26 07:20:09.212603994 +0000 UTC m=+1510.732745613" observedRunningTime="2025-11-26 07:20:09.865511759 +0000 UTC m=+1511.385653388" watchObservedRunningTime="2025-11-26 07:20:09.872979535 +0000 UTC m=+1511.393121164" Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.784158 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5q4sh"] Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.788387 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.803685 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b495" event={"ID":"d9a480fc-1e8a-4f40-8add-a75b75641f4e","Type":"ContainerStarted","Data":"fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a"} Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.809114 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb2nd" event={"ID":"f53b29f5-a2cb-45a2-84ea-71322b48ea8b","Type":"ContainerStarted","Data":"dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08"} Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.811499 4940 generic.go:334] "Generic (PLEG): container finished" podID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerID="b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3" exitCode=0 Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.812093 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x54z7" event={"ID":"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f","Type":"ContainerDied","Data":"b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3"} Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.817938 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5q4sh"] Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.851711 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9b495" podStartSLOduration=3.324856406 podStartE2EDuration="5.851677152s" podCreationTimestamp="2025-11-26 07:20:05 +0000 UTC" firstStartedPulling="2025-11-26 07:20:07.790273056 +0000 UTC m=+1509.310414675" lastFinishedPulling="2025-11-26 07:20:10.317093802 +0000 UTC m=+1511.837235421" observedRunningTime="2025-11-26 07:20:10.844387731 +0000 UTC m=+1512.364529350" watchObservedRunningTime="2025-11-26 07:20:10.851677152 +0000 UTC m=+1512.371818771" Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.870303 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bb2nd" podStartSLOduration=2.296835089 podStartE2EDuration="4.870284071s" podCreationTimestamp="2025-11-26 07:20:06 +0000 UTC" firstStartedPulling="2025-11-26 07:20:07.725442454 +0000 UTC m=+1509.245584073" lastFinishedPulling="2025-11-26 07:20:10.298891436 +0000 UTC m=+1511.819033055" observedRunningTime="2025-11-26 07:20:10.866505171 +0000 UTC m=+1512.386646800" watchObservedRunningTime="2025-11-26 07:20:10.870284071 +0000 UTC m=+1512.390425690" Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.900286 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-utilities\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.900548 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-catalog-content\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:10 crc kubenswrapper[4940]: I1126 07:20:10.900694 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb6hb\" (UniqueName: \"kubernetes.io/projected/569e0af8-c086-4193-b1f9-4764c62b0d80-kube-api-access-lb6hb\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.002523 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb6hb\" (UniqueName: \"kubernetes.io/projected/569e0af8-c086-4193-b1f9-4764c62b0d80-kube-api-access-lb6hb\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.002713 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-utilities\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.002744 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-catalog-content\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.003301 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-utilities\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.003342 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-catalog-content\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.034184 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb6hb\" (UniqueName: \"kubernetes.io/projected/569e0af8-c086-4193-b1f9-4764c62b0d80-kube-api-access-lb6hb\") pod \"redhat-marketplace-5q4sh\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.114356 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.297174 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.297593 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.401418 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-frx6v"] Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.403075 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.428082 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-frx6v"] Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.513077 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdx98\" (UniqueName: \"kubernetes.io/projected/dfef8b09-5a70-4b1e-8287-763763cd8b99-kube-api-access-vdx98\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.513178 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-catalog-content\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.513230 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-utilities\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.606943 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5q4sh"] Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.614036 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-utilities\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.614120 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdx98\" (UniqueName: \"kubernetes.io/projected/dfef8b09-5a70-4b1e-8287-763763cd8b99-kube-api-access-vdx98\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.614171 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-catalog-content\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.614555 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-catalog-content\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.614766 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-utilities\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.641988 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdx98\" (UniqueName: \"kubernetes.io/projected/dfef8b09-5a70-4b1e-8287-763763cd8b99-kube-api-access-vdx98\") pod \"redhat-operators-frx6v\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.777361 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.822711 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x54z7" event={"ID":"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f","Type":"ContainerStarted","Data":"dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef"} Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.829577 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5q4sh" event={"ID":"569e0af8-c086-4193-b1f9-4764c62b0d80","Type":"ContainerStarted","Data":"697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a"} Nov 26 07:20:11 crc kubenswrapper[4940]: I1126 07:20:11.829617 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5q4sh" event={"ID":"569e0af8-c086-4193-b1f9-4764c62b0d80","Type":"ContainerStarted","Data":"85ce9b8eb5c9c0a5a37d64492d47ed537ac50e9dda59a1203c5bd33aa19ca287"} Nov 26 07:20:12 crc kubenswrapper[4940]: I1126 07:20:12.111609 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:12 crc kubenswrapper[4940]: I1126 07:20:12.111872 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:12 crc kubenswrapper[4940]: I1126 07:20:12.155485 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:12 crc kubenswrapper[4940]: I1126 07:20:12.258390 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-frx6v"] Nov 26 07:20:12 crc kubenswrapper[4940]: I1126 07:20:12.367641 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ltrj6" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="registry-server" probeResult="failure" output=< Nov 26 07:20:12 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 07:20:12 crc kubenswrapper[4940]: > Nov 26 07:20:12 crc kubenswrapper[4940]: I1126 07:20:12.859563 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-frx6v" event={"ID":"dfef8b09-5a70-4b1e-8287-763763cd8b99","Type":"ContainerStarted","Data":"f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc"} Nov 26 07:20:12 crc kubenswrapper[4940]: I1126 07:20:12.859905 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-frx6v" event={"ID":"dfef8b09-5a70-4b1e-8287-763763cd8b99","Type":"ContainerStarted","Data":"16b7fddcba27ec8372fd3b85ae11bb18f25a98998e53a1c862ccbc7a974bc148"} Nov 26 07:20:12 crc kubenswrapper[4940]: I1126 07:20:12.932825 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.385861 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2rwkk"] Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.387868 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.399376 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2rwkk"] Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.447274 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-catalog-content\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.447434 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6tfb\" (UniqueName: \"kubernetes.io/projected/0eca03fd-c0fb-4900-a1de-1637a499e1cc-kube-api-access-z6tfb\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.447502 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-utilities\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.548687 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-catalog-content\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.548876 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6tfb\" (UniqueName: \"kubernetes.io/projected/0eca03fd-c0fb-4900-a1de-1637a499e1cc-kube-api-access-z6tfb\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.548917 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-utilities\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.549319 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-catalog-content\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.549417 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-utilities\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.580858 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6tfb\" (UniqueName: \"kubernetes.io/projected/0eca03fd-c0fb-4900-a1de-1637a499e1cc-kube-api-access-z6tfb\") pod \"certified-operators-2rwkk\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.722287 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.872065 4940 generic.go:334] "Generic (PLEG): container finished" podID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerID="697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a" exitCode=0 Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.872136 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5q4sh" event={"ID":"569e0af8-c086-4193-b1f9-4764c62b0d80","Type":"ContainerDied","Data":"697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a"} Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.874696 4940 generic.go:334] "Generic (PLEG): container finished" podID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerID="f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc" exitCode=0 Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.874751 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-frx6v" event={"ID":"dfef8b09-5a70-4b1e-8287-763763cd8b99","Type":"ContainerDied","Data":"f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc"} Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.880519 4940 generic.go:334] "Generic (PLEG): container finished" podID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerID="dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef" exitCode=0 Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.881099 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x54z7" event={"ID":"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f","Type":"ContainerDied","Data":"dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef"} Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.980640 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fwmzp"] Nov 26 07:20:13 crc kubenswrapper[4940]: I1126 07:20:13.982930 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.010425 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwmzp"] Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.012591 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.012640 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.059435 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-catalog-content\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.059477 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97nq4\" (UniqueName: \"kubernetes.io/projected/333dad53-ee05-4006-a57b-80ee2c090144-kube-api-access-97nq4\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.059516 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-utilities\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.060739 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.160474 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-catalog-content\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.160521 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97nq4\" (UniqueName: \"kubernetes.io/projected/333dad53-ee05-4006-a57b-80ee2c090144-kube-api-access-97nq4\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.160561 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-utilities\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.161017 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-catalog-content\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.161120 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-utilities\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.190130 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97nq4\" (UniqueName: \"kubernetes.io/projected/333dad53-ee05-4006-a57b-80ee2c090144-kube-api-access-97nq4\") pod \"redhat-marketplace-fwmzp\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.205070 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2rwkk"] Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.312815 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.526492 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.526540 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.907384 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x54z7" event={"ID":"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f","Type":"ContainerStarted","Data":"534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d"} Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.911583 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2rwkk" event={"ID":"0eca03fd-c0fb-4900-a1de-1637a499e1cc","Type":"ContainerStarted","Data":"3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d"} Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.911632 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2rwkk" event={"ID":"0eca03fd-c0fb-4900-a1de-1637a499e1cc","Type":"ContainerStarted","Data":"1591f8451add3a7e282016be9bd58ad1aa1e883af29148efe9d999e6c740dfc8"} Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.933219 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fh5xx" event={"ID":"29459127-09a2-47c2-b6ca-3b76342e6e04","Type":"ContainerStarted","Data":"e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997"} Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.939575 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x54z7" podStartSLOduration=3.446789198 podStartE2EDuration="6.939557857s" podCreationTimestamp="2025-11-26 07:20:08 +0000 UTC" firstStartedPulling="2025-11-26 07:20:10.813988619 +0000 UTC m=+1512.334130238" lastFinishedPulling="2025-11-26 07:20:14.306757278 +0000 UTC m=+1515.826898897" observedRunningTime="2025-11-26 07:20:14.931077468 +0000 UTC m=+1516.451219087" watchObservedRunningTime="2025-11-26 07:20:14.939557857 +0000 UTC m=+1516.459699466" Nov 26 07:20:14 crc kubenswrapper[4940]: I1126 07:20:14.944433 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5q4sh" event={"ID":"569e0af8-c086-4193-b1f9-4764c62b0d80","Type":"ContainerStarted","Data":"a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098"} Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.036555 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.077517 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwmzp"] Nov 26 07:20:15 crc kubenswrapper[4940]: W1126 07:20:15.082305 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod333dad53_ee05_4006_a57b_80ee2c090144.slice/crio-a1358580ec112e5bcc249eed870d01fe1caa66da921d58339a98a6e81a1ae0f7 WatchSource:0}: Error finding container a1358580ec112e5bcc249eed870d01fe1caa66da921d58339a98a6e81a1ae0f7: Status 404 returned error can't find the container with id a1358580ec112e5bcc249eed870d01fe1caa66da921d58339a98a6e81a1ae0f7 Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.575014 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dzxrb" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="registry-server" probeResult="failure" output=< Nov 26 07:20:15 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 07:20:15 crc kubenswrapper[4940]: > Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.775493 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5m2tc"] Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.777203 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.786567 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5m2tc"] Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.933721 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-catalog-content\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.933777 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-279wc\" (UniqueName: \"kubernetes.io/projected/6be8a53f-d253-4c4b-8e7c-87277566773d-kube-api-access-279wc\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.933882 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-utilities\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.957745 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwmzp" event={"ID":"333dad53-ee05-4006-a57b-80ee2c090144","Type":"ContainerStarted","Data":"2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299"} Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.957832 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwmzp" event={"ID":"333dad53-ee05-4006-a57b-80ee2c090144","Type":"ContainerStarted","Data":"a1358580ec112e5bcc249eed870d01fe1caa66da921d58339a98a6e81a1ae0f7"} Nov 26 07:20:15 crc kubenswrapper[4940]: I1126 07:20:15.960763 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-frx6v" event={"ID":"dfef8b09-5a70-4b1e-8287-763763cd8b99","Type":"ContainerStarted","Data":"4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32"} Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.035141 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-catalog-content\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.035185 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-279wc\" (UniqueName: \"kubernetes.io/projected/6be8a53f-d253-4c4b-8e7c-87277566773d-kube-api-access-279wc\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.035245 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-utilities\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.035924 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-catalog-content\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.035939 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-utilities\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.060190 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-279wc\" (UniqueName: \"kubernetes.io/projected/6be8a53f-d253-4c4b-8e7c-87277566773d-kube-api-access-279wc\") pod \"redhat-operators-5m2tc\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.107256 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.368122 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.368442 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.433500 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.608000 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5m2tc"] Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.909199 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.909265 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.958919 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.972524 4940 generic.go:334] "Generic (PLEG): container finished" podID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerID="a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098" exitCode=0 Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.972601 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5q4sh" event={"ID":"569e0af8-c086-4193-b1f9-4764c62b0d80","Type":"ContainerDied","Data":"a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098"} Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.974654 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m2tc" event={"ID":"6be8a53f-d253-4c4b-8e7c-87277566773d","Type":"ContainerStarted","Data":"05581b0e697c504b7c1893755032ef19aba4f2c48e20b4bbf889ee4ad8938120"} Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.981585 4940 generic.go:334] "Generic (PLEG): container finished" podID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerID="3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d" exitCode=0 Nov 26 07:20:16 crc kubenswrapper[4940]: I1126 07:20:16.981698 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2rwkk" event={"ID":"0eca03fd-c0fb-4900-a1de-1637a499e1cc","Type":"ContainerDied","Data":"3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d"} Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.044193 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.062764 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.191168 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r7lxz"] Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.195814 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.211025 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r7lxz"] Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.359947 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-catalog-content\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.360023 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-utilities\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.360054 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndxcs\" (UniqueName: \"kubernetes.io/projected/fdaa4f87-0d23-46ca-882d-eb01a4482290-kube-api-access-ndxcs\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.461644 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-catalog-content\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.461719 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-utilities\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.461741 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndxcs\" (UniqueName: \"kubernetes.io/projected/fdaa4f87-0d23-46ca-882d-eb01a4482290-kube-api-access-ndxcs\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.462142 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-catalog-content\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.462312 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-utilities\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.481706 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndxcs\" (UniqueName: \"kubernetes.io/projected/fdaa4f87-0d23-46ca-882d-eb01a4482290-kube-api-access-ndxcs\") pod \"certified-operators-r7lxz\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.521295 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.987456 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r7lxz"] Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.999767 4940 generic.go:334] "Generic (PLEG): container finished" podID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerID="e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997" exitCode=0 Nov 26 07:20:17 crc kubenswrapper[4940]: I1126 07:20:17.999886 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fh5xx" event={"ID":"29459127-09a2-47c2-b6ca-3b76342e6e04","Type":"ContainerDied","Data":"e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997"} Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.001923 4940 generic.go:334] "Generic (PLEG): container finished" podID="333dad53-ee05-4006-a57b-80ee2c090144" containerID="2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299" exitCode=0 Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.002008 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwmzp" event={"ID":"333dad53-ee05-4006-a57b-80ee2c090144","Type":"ContainerDied","Data":"2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299"} Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.010179 4940 generic.go:334] "Generic (PLEG): container finished" podID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerID="4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32" exitCode=0 Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.010258 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-frx6v" event={"ID":"dfef8b09-5a70-4b1e-8287-763763cd8b99","Type":"ContainerDied","Data":"4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32"} Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.013948 4940 generic.go:334] "Generic (PLEG): container finished" podID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerID="348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110" exitCode=0 Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.013993 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m2tc" event={"ID":"6be8a53f-d253-4c4b-8e7c-87277566773d","Type":"ContainerDied","Data":"348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110"} Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.182838 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jnr7b"] Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.184504 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.195798 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jnr7b"] Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.287694 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-catalog-content\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.287768 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-utilities\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.288318 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnfd8\" (UniqueName: \"kubernetes.io/projected/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-kube-api-access-nnfd8\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.389926 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-catalog-content\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.389990 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-utilities\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.390103 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnfd8\" (UniqueName: \"kubernetes.io/projected/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-kube-api-access-nnfd8\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.391015 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-catalog-content\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.391283 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-utilities\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.412712 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnfd8\" (UniqueName: \"kubernetes.io/projected/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-kube-api-access-nnfd8\") pod \"redhat-marketplace-jnr7b\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.508563 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.981782 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9xjr2"] Nov 26 07:20:18 crc kubenswrapper[4940]: I1126 07:20:18.984420 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.003155 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-utilities\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.003230 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-catalog-content\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.003335 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47jjq\" (UniqueName: \"kubernetes.io/projected/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-kube-api-access-47jjq\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.013753 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9xjr2"] Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.031796 4940 generic.go:334] "Generic (PLEG): container finished" podID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerID="0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa" exitCode=0 Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.031859 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r7lxz" event={"ID":"fdaa4f87-0d23-46ca-882d-eb01a4482290","Type":"ContainerDied","Data":"0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa"} Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.031887 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r7lxz" event={"ID":"fdaa4f87-0d23-46ca-882d-eb01a4482290","Type":"ContainerStarted","Data":"e09db8a378839a6731b9923af097af450d62e3921a1539ae496d15a0749f2ed3"} Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.065627 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-frx6v" event={"ID":"dfef8b09-5a70-4b1e-8287-763763cd8b99","Type":"ContainerStarted","Data":"4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c"} Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.085484 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2rwkk" event={"ID":"0eca03fd-c0fb-4900-a1de-1637a499e1cc","Type":"ContainerStarted","Data":"0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52"} Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.105893 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-utilities\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.105929 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-catalog-content\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.106619 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47jjq\" (UniqueName: \"kubernetes.io/projected/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-kube-api-access-47jjq\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.109540 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-utilities\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.109539 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-catalog-content\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.113175 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-frx6v" podStartSLOduration=3.569601128 podStartE2EDuration="8.113128823s" podCreationTimestamp="2025-11-26 07:20:11 +0000 UTC" firstStartedPulling="2025-11-26 07:20:13.875727046 +0000 UTC m=+1515.395868675" lastFinishedPulling="2025-11-26 07:20:18.419254751 +0000 UTC m=+1519.939396370" observedRunningTime="2025-11-26 07:20:19.107598348 +0000 UTC m=+1520.627739967" watchObservedRunningTime="2025-11-26 07:20:19.113128823 +0000 UTC m=+1520.633270442" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.119250 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fh5xx" event={"ID":"29459127-09a2-47c2-b6ca-3b76342e6e04","Type":"ContainerStarted","Data":"3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad"} Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.138118 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47jjq\" (UniqueName: \"kubernetes.io/projected/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-kube-api-access-47jjq\") pod \"redhat-operators-9xjr2\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.143482 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5q4sh" event={"ID":"569e0af8-c086-4193-b1f9-4764c62b0d80","Type":"ContainerStarted","Data":"d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703"} Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.166832 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fh5xx" podStartSLOduration=2.5380578050000002 podStartE2EDuration="11.166808572s" podCreationTimestamp="2025-11-26 07:20:08 +0000 UTC" firstStartedPulling="2025-11-26 07:20:09.776886124 +0000 UTC m=+1511.297027743" lastFinishedPulling="2025-11-26 07:20:18.405636891 +0000 UTC m=+1519.925778510" observedRunningTime="2025-11-26 07:20:19.163303701 +0000 UTC m=+1520.683445320" watchObservedRunningTime="2025-11-26 07:20:19.166808572 +0000 UTC m=+1520.686950191" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.192461 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.211611 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5q4sh" podStartSLOduration=4.848008539 podStartE2EDuration="9.211587689s" podCreationTimestamp="2025-11-26 07:20:10 +0000 UTC" firstStartedPulling="2025-11-26 07:20:13.875219809 +0000 UTC m=+1515.395361428" lastFinishedPulling="2025-11-26 07:20:18.238798959 +0000 UTC m=+1519.758940578" observedRunningTime="2025-11-26 07:20:19.186070452 +0000 UTC m=+1520.706212081" watchObservedRunningTime="2025-11-26 07:20:19.211587689 +0000 UTC m=+1520.731729308" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.277387 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jnr7b"] Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.327899 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.327983 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.388586 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:19 crc kubenswrapper[4940]: I1126 07:20:19.776121 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9xjr2"] Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.164552 4940 generic.go:334] "Generic (PLEG): container finished" podID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerID="58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a" exitCode=0 Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.164871 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m2tc" event={"ID":"6be8a53f-d253-4c4b-8e7c-87277566773d","Type":"ContainerDied","Data":"58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a"} Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.191844 4940 generic.go:334] "Generic (PLEG): container finished" podID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerID="0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52" exitCode=0 Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.191938 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2rwkk" event={"ID":"0eca03fd-c0fb-4900-a1de-1637a499e1cc","Type":"ContainerDied","Data":"0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52"} Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.211089 4940 generic.go:334] "Generic (PLEG): container finished" podID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerID="7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853" exitCode=0 Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.211233 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xjr2" event={"ID":"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627","Type":"ContainerDied","Data":"7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853"} Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.211285 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xjr2" event={"ID":"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627","Type":"ContainerStarted","Data":"986ea95aa97a4f35f02d125cfcededfb3f8e2c80f307e74d055477090fb71a90"} Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.219768 4940 generic.go:334] "Generic (PLEG): container finished" podID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerID="0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea" exitCode=0 Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.220622 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jnr7b" event={"ID":"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465","Type":"ContainerDied","Data":"0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea"} Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.220651 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jnr7b" event={"ID":"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465","Type":"ContainerStarted","Data":"f1134c058a135b19545145e834b43e90ea560b5eaaa3b8ea728e30792c54f787"} Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.222922 4940 generic.go:334] "Generic (PLEG): container finished" podID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerID="486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07" exitCode=0 Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.223000 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r7lxz" event={"ID":"fdaa4f87-0d23-46ca-882d-eb01a4482290","Type":"ContainerDied","Data":"486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07"} Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.228293 4940 generic.go:334] "Generic (PLEG): container finished" podID="333dad53-ee05-4006-a57b-80ee2c090144" containerID="f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a" exitCode=0 Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.228834 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwmzp" event={"ID":"333dad53-ee05-4006-a57b-80ee2c090144","Type":"ContainerDied","Data":"f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a"} Nov 26 07:20:20 crc kubenswrapper[4940]: I1126 07:20:20.330664 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.114949 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.115324 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.192608 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.239839 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m2tc" event={"ID":"6be8a53f-d253-4c4b-8e7c-87277566773d","Type":"ContainerStarted","Data":"00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f"} Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.242202 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2rwkk" event={"ID":"0eca03fd-c0fb-4900-a1de-1637a499e1cc","Type":"ContainerStarted","Data":"172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f"} Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.244514 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xjr2" event={"ID":"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627","Type":"ContainerStarted","Data":"f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109"} Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.246441 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jnr7b" event={"ID":"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465","Type":"ContainerStarted","Data":"5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d"} Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.248717 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r7lxz" event={"ID":"fdaa4f87-0d23-46ca-882d-eb01a4482290","Type":"ContainerStarted","Data":"1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd"} Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.251094 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwmzp" event={"ID":"333dad53-ee05-4006-a57b-80ee2c090144","Type":"ContainerStarted","Data":"997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045"} Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.267865 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5m2tc" podStartSLOduration=3.625949861 podStartE2EDuration="6.26784276s" podCreationTimestamp="2025-11-26 07:20:15 +0000 UTC" firstStartedPulling="2025-11-26 07:20:18.019850009 +0000 UTC m=+1519.539991638" lastFinishedPulling="2025-11-26 07:20:20.661742918 +0000 UTC m=+1522.181884537" observedRunningTime="2025-11-26 07:20:21.261786378 +0000 UTC m=+1522.781927997" watchObservedRunningTime="2025-11-26 07:20:21.26784276 +0000 UTC m=+1522.787984379" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.326578 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2rwkk" podStartSLOduration=4.605458493 podStartE2EDuration="8.326555138s" podCreationTimestamp="2025-11-26 07:20:13 +0000 UTC" firstStartedPulling="2025-11-26 07:20:16.983990124 +0000 UTC m=+1518.504131743" lastFinishedPulling="2025-11-26 07:20:20.705086769 +0000 UTC m=+1522.225228388" observedRunningTime="2025-11-26 07:20:21.302510398 +0000 UTC m=+1522.822652017" watchObservedRunningTime="2025-11-26 07:20:21.326555138 +0000 UTC m=+1522.846696757" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.355080 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.361579 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fwmzp" podStartSLOduration=5.71180833 podStartE2EDuration="8.361552856s" podCreationTimestamp="2025-11-26 07:20:13 +0000 UTC" firstStartedPulling="2025-11-26 07:20:18.019963353 +0000 UTC m=+1519.540105012" lastFinishedPulling="2025-11-26 07:20:20.669707889 +0000 UTC m=+1522.189849538" observedRunningTime="2025-11-26 07:20:21.352101067 +0000 UTC m=+1522.872242676" watchObservedRunningTime="2025-11-26 07:20:21.361552856 +0000 UTC m=+1522.881694505" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.420600 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r7lxz" podStartSLOduration=2.732494896 podStartE2EDuration="4.420582335s" podCreationTimestamp="2025-11-26 07:20:17 +0000 UTC" firstStartedPulling="2025-11-26 07:20:19.050927724 +0000 UTC m=+1520.571069343" lastFinishedPulling="2025-11-26 07:20:20.739015163 +0000 UTC m=+1522.259156782" observedRunningTime="2025-11-26 07:20:21.375823848 +0000 UTC m=+1522.895965467" watchObservedRunningTime="2025-11-26 07:20:21.420582335 +0000 UTC m=+1522.940723954" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.452993 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.777729 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:21 crc kubenswrapper[4940]: I1126 07:20:21.778511 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:22 crc kubenswrapper[4940]: I1126 07:20:22.820544 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-frx6v" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="registry-server" probeResult="failure" output=< Nov 26 07:20:22 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 07:20:22 crc kubenswrapper[4940]: > Nov 26 07:20:23 crc kubenswrapper[4940]: I1126 07:20:23.722471 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:23 crc kubenswrapper[4940]: I1126 07:20:23.722809 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:23 crc kubenswrapper[4940]: I1126 07:20:23.778517 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:24 crc kubenswrapper[4940]: I1126 07:20:24.313431 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:24 crc kubenswrapper[4940]: I1126 07:20:24.313478 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:24 crc kubenswrapper[4940]: I1126 07:20:24.386643 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:24 crc kubenswrapper[4940]: E1126 07:20:24.428446 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod721dadd4_5c8a_4673_bcd6_d4d4ee5f1465.slice/crio-conmon-5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:20:24 crc kubenswrapper[4940]: I1126 07:20:24.589747 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:24 crc kubenswrapper[4940]: I1126 07:20:24.647860 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:25 crc kubenswrapper[4940]: I1126 07:20:25.292900 4940 generic.go:334] "Generic (PLEG): container finished" podID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerID="f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109" exitCode=0 Nov 26 07:20:25 crc kubenswrapper[4940]: I1126 07:20:25.292956 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xjr2" event={"ID":"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627","Type":"ContainerDied","Data":"f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109"} Nov 26 07:20:25 crc kubenswrapper[4940]: I1126 07:20:25.299051 4940 generic.go:334] "Generic (PLEG): container finished" podID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerID="5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d" exitCode=0 Nov 26 07:20:25 crc kubenswrapper[4940]: I1126 07:20:25.299123 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jnr7b" event={"ID":"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465","Type":"ContainerDied","Data":"5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d"} Nov 26 07:20:26 crc kubenswrapper[4940]: I1126 07:20:26.107768 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:26 crc kubenswrapper[4940]: I1126 07:20:26.108851 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:26 crc kubenswrapper[4940]: I1126 07:20:26.311184 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jnr7b" event={"ID":"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465","Type":"ContainerStarted","Data":"85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1"} Nov 26 07:20:26 crc kubenswrapper[4940]: I1126 07:20:26.314264 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xjr2" event={"ID":"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627","Type":"ContainerStarted","Data":"3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a"} Nov 26 07:20:26 crc kubenswrapper[4940]: I1126 07:20:26.352680 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jnr7b" podStartSLOduration=2.842887141 podStartE2EDuration="8.352664989s" podCreationTimestamp="2025-11-26 07:20:18 +0000 UTC" firstStartedPulling="2025-11-26 07:20:20.221255206 +0000 UTC m=+1521.741396845" lastFinishedPulling="2025-11-26 07:20:25.731033054 +0000 UTC m=+1527.251174693" observedRunningTime="2025-11-26 07:20:26.3511321 +0000 UTC m=+1527.871273719" watchObservedRunningTime="2025-11-26 07:20:26.352664989 +0000 UTC m=+1527.872806608" Nov 26 07:20:26 crc kubenswrapper[4940]: I1126 07:20:26.373662 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9xjr2" podStartSLOduration=2.8287785039999997 podStartE2EDuration="8.373644713s" podCreationTimestamp="2025-11-26 07:20:18 +0000 UTC" firstStartedPulling="2025-11-26 07:20:20.216149064 +0000 UTC m=+1521.736290703" lastFinishedPulling="2025-11-26 07:20:25.761015283 +0000 UTC m=+1527.281156912" observedRunningTime="2025-11-26 07:20:26.370476433 +0000 UTC m=+1527.890618052" watchObservedRunningTime="2025-11-26 07:20:26.373644713 +0000 UTC m=+1527.893786332" Nov 26 07:20:27 crc kubenswrapper[4940]: I1126 07:20:27.163736 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5m2tc" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="registry-server" probeResult="failure" output=< Nov 26 07:20:27 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 07:20:27 crc kubenswrapper[4940]: > Nov 26 07:20:27 crc kubenswrapper[4940]: I1126 07:20:27.522360 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:27 crc kubenswrapper[4940]: I1126 07:20:27.522404 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:27 crc kubenswrapper[4940]: I1126 07:20:27.576961 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:28 crc kubenswrapper[4940]: I1126 07:20:28.391399 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:28 crc kubenswrapper[4940]: I1126 07:20:28.509459 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:28 crc kubenswrapper[4940]: I1126 07:20:28.509517 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:28 crc kubenswrapper[4940]: I1126 07:20:28.561651 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:28 crc kubenswrapper[4940]: I1126 07:20:28.725737 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:28 crc kubenswrapper[4940]: I1126 07:20:28.725782 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:28 crc kubenswrapper[4940]: I1126 07:20:28.775377 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:29 crc kubenswrapper[4940]: I1126 07:20:29.194030 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:29 crc kubenswrapper[4940]: I1126 07:20:29.194377 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:29 crc kubenswrapper[4940]: I1126 07:20:29.398095 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:30 crc kubenswrapper[4940]: I1126 07:20:30.310967 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9xjr2" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="registry-server" probeResult="failure" output=< Nov 26 07:20:30 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 07:20:30 crc kubenswrapper[4940]: > Nov 26 07:20:31 crc kubenswrapper[4940]: I1126 07:20:31.177192 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:31 crc kubenswrapper[4940]: I1126 07:20:31.840666 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:31 crc kubenswrapper[4940]: I1126 07:20:31.898953 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:33 crc kubenswrapper[4940]: I1126 07:20:33.788243 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:34 crc kubenswrapper[4940]: I1126 07:20:34.384973 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:36 crc kubenswrapper[4940]: I1126 07:20:36.178222 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:36 crc kubenswrapper[4940]: I1126 07:20:36.254466 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:36 crc kubenswrapper[4940]: I1126 07:20:36.772595 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2rwkk"] Nov 26 07:20:36 crc kubenswrapper[4940]: I1126 07:20:36.772923 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2rwkk" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerName="registry-server" containerID="cri-o://172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f" gracePeriod=2 Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.186399 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.366181 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-catalog-content\") pod \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.366313 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6tfb\" (UniqueName: \"kubernetes.io/projected/0eca03fd-c0fb-4900-a1de-1637a499e1cc-kube-api-access-z6tfb\") pod \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.366515 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-utilities\") pod \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\" (UID: \"0eca03fd-c0fb-4900-a1de-1637a499e1cc\") " Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.368291 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-utilities" (OuterVolumeSpecName: "utilities") pod "0eca03fd-c0fb-4900-a1de-1637a499e1cc" (UID: "0eca03fd-c0fb-4900-a1de-1637a499e1cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.379163 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eca03fd-c0fb-4900-a1de-1637a499e1cc-kube-api-access-z6tfb" (OuterVolumeSpecName: "kube-api-access-z6tfb") pod "0eca03fd-c0fb-4900-a1de-1637a499e1cc" (UID: "0eca03fd-c0fb-4900-a1de-1637a499e1cc"). InnerVolumeSpecName "kube-api-access-z6tfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.380698 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-92mvv"] Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.381191 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-92mvv" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="registry-server" containerID="cri-o://b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691" gracePeriod=2 Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.419801 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0eca03fd-c0fb-4900-a1de-1637a499e1cc" (UID: "0eca03fd-c0fb-4900-a1de-1637a499e1cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.437438 4940 generic.go:334] "Generic (PLEG): container finished" podID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerID="172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f" exitCode=0 Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.437476 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2rwkk" event={"ID":"0eca03fd-c0fb-4900-a1de-1637a499e1cc","Type":"ContainerDied","Data":"172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f"} Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.437502 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2rwkk" event={"ID":"0eca03fd-c0fb-4900-a1de-1637a499e1cc","Type":"ContainerDied","Data":"1591f8451add3a7e282016be9bd58ad1aa1e883af29148efe9d999e6c740dfc8"} Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.437517 4940 scope.go:117] "RemoveContainer" containerID="172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.437625 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2rwkk" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.468414 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.468471 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6tfb\" (UniqueName: \"kubernetes.io/projected/0eca03fd-c0fb-4900-a1de-1637a499e1cc-kube-api-access-z6tfb\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.468493 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0eca03fd-c0fb-4900-a1de-1637a499e1cc-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.500884 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2rwkk"] Nov 26 07:20:37 crc kubenswrapper[4940]: E1126 07:20:37.505338 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691 is running failed: container process not found" containerID="b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.505437 4940 scope.go:117] "RemoveContainer" containerID="0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52" Nov 26 07:20:37 crc kubenswrapper[4940]: E1126 07:20:37.505874 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691 is running failed: container process not found" containerID="b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 07:20:37 crc kubenswrapper[4940]: E1126 07:20:37.506550 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691 is running failed: container process not found" containerID="b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 07:20:37 crc kubenswrapper[4940]: E1126 07:20:37.506610 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-92mvv" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="registry-server" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.507623 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2rwkk"] Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.551348 4940 scope.go:117] "RemoveContainer" containerID="3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.577273 4940 scope.go:117] "RemoveContainer" containerID="172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f" Nov 26 07:20:37 crc kubenswrapper[4940]: E1126 07:20:37.579460 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f\": container with ID starting with 172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f not found: ID does not exist" containerID="172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.579527 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f"} err="failed to get container status \"172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f\": rpc error: code = NotFound desc = could not find container \"172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f\": container with ID starting with 172e118cff53a3491c39f0f2e200e5a3506204c1e84b383ae772b328fac9374f not found: ID does not exist" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.579567 4940 scope.go:117] "RemoveContainer" containerID="0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52" Nov 26 07:20:37 crc kubenswrapper[4940]: E1126 07:20:37.579970 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52\": container with ID starting with 0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52 not found: ID does not exist" containerID="0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.580011 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52"} err="failed to get container status \"0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52\": rpc error: code = NotFound desc = could not find container \"0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52\": container with ID starting with 0fd198adbf350996427bdc1a4499a9ca582b155faa447cf81ff539dfce9b9e52 not found: ID does not exist" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.580109 4940 scope.go:117] "RemoveContainer" containerID="3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d" Nov 26 07:20:37 crc kubenswrapper[4940]: E1126 07:20:37.580359 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d\": container with ID starting with 3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d not found: ID does not exist" containerID="3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.580391 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d"} err="failed to get container status \"3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d\": rpc error: code = NotFound desc = could not find container \"3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d\": container with ID starting with 3a9c15db74d96f87fa4d182055c250772ed355659d531186c92c39e9546a516d not found: ID does not exist" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.831511 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.969125 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w7c4d"] Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.969414 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w7c4d" podUID="907d2835-1aee-4b5a-9726-a75946007030" containerName="registry-server" containerID="cri-o://4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243" gracePeriod=2 Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.975646 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-utilities\") pod \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.975732 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-catalog-content\") pod \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.975869 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9q62\" (UniqueName: \"kubernetes.io/projected/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-kube-api-access-p9q62\") pod \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\" (UID: \"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4\") " Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.976488 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-utilities" (OuterVolumeSpecName: "utilities") pod "9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" (UID: "9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:37 crc kubenswrapper[4940]: I1126 07:20:37.982239 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-kube-api-access-p9q62" (OuterVolumeSpecName: "kube-api-access-p9q62") pod "9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" (UID: "9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4"). InnerVolumeSpecName "kube-api-access-p9q62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.032816 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" (UID: "9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.083132 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.083176 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.083194 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9q62\" (UniqueName: \"kubernetes.io/projected/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4-kube-api-access-p9q62\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.380669 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.448331 4940 generic.go:334] "Generic (PLEG): container finished" podID="907d2835-1aee-4b5a-9726-a75946007030" containerID="4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243" exitCode=0 Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.448377 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7c4d" event={"ID":"907d2835-1aee-4b5a-9726-a75946007030","Type":"ContainerDied","Data":"4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243"} Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.449004 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w7c4d" event={"ID":"907d2835-1aee-4b5a-9726-a75946007030","Type":"ContainerDied","Data":"2e502d94231d66fc93f79568084694b5ce03be2bdc03742bfcecd3f385e65b72"} Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.448412 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w7c4d" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.449033 4940 scope.go:117] "RemoveContainer" containerID="4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.454122 4940 generic.go:334] "Generic (PLEG): container finished" podID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerID="b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691" exitCode=0 Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.454166 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92mvv" event={"ID":"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4","Type":"ContainerDied","Data":"b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691"} Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.454181 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92mvv" event={"ID":"9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4","Type":"ContainerDied","Data":"448f4d428ba8487d7616598fb8078dea3e75a054e5b56dc7420b4398d91bfbc3"} Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.454193 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92mvv" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.485018 4940 scope.go:117] "RemoveContainer" containerID="7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.488070 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-catalog-content\") pod \"907d2835-1aee-4b5a-9726-a75946007030\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.488175 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-utilities\") pod \"907d2835-1aee-4b5a-9726-a75946007030\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.488215 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6b6zb\" (UniqueName: \"kubernetes.io/projected/907d2835-1aee-4b5a-9726-a75946007030-kube-api-access-6b6zb\") pod \"907d2835-1aee-4b5a-9726-a75946007030\" (UID: \"907d2835-1aee-4b5a-9726-a75946007030\") " Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.489183 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-utilities" (OuterVolumeSpecName: "utilities") pod "907d2835-1aee-4b5a-9726-a75946007030" (UID: "907d2835-1aee-4b5a-9726-a75946007030"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.491897 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/907d2835-1aee-4b5a-9726-a75946007030-kube-api-access-6b6zb" (OuterVolumeSpecName: "kube-api-access-6b6zb") pod "907d2835-1aee-4b5a-9726-a75946007030" (UID: "907d2835-1aee-4b5a-9726-a75946007030"). InnerVolumeSpecName "kube-api-access-6b6zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.491952 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-92mvv"] Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.499295 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-92mvv"] Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.510856 4940 scope.go:117] "RemoveContainer" containerID="99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.526175 4940 scope.go:117] "RemoveContainer" containerID="4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243" Nov 26 07:20:38 crc kubenswrapper[4940]: E1126 07:20:38.526669 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243\": container with ID starting with 4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243 not found: ID does not exist" containerID="4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.526707 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243"} err="failed to get container status \"4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243\": rpc error: code = NotFound desc = could not find container \"4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243\": container with ID starting with 4cb4feed74707f79c26f528e5cd17de204e41ad521b85ba6c0c1526b31d24243 not found: ID does not exist" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.526733 4940 scope.go:117] "RemoveContainer" containerID="7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a" Nov 26 07:20:38 crc kubenswrapper[4940]: E1126 07:20:38.526940 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a\": container with ID starting with 7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a not found: ID does not exist" containerID="7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.526967 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a"} err="failed to get container status \"7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a\": rpc error: code = NotFound desc = could not find container \"7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a\": container with ID starting with 7c1f7363cf7d6645092679aa8f2352bfd37c2d6e86afdc08fc2b484f7288ae1a not found: ID does not exist" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.526985 4940 scope.go:117] "RemoveContainer" containerID="99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472" Nov 26 07:20:38 crc kubenswrapper[4940]: E1126 07:20:38.527263 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472\": container with ID starting with 99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472 not found: ID does not exist" containerID="99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.527289 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472"} err="failed to get container status \"99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472\": rpc error: code = NotFound desc = could not find container \"99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472\": container with ID starting with 99efa4e05a63c6cb05ce55a68ca38c447f8f75c4fb34b43e27121244d4b80472 not found: ID does not exist" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.527306 4940 scope.go:117] "RemoveContainer" containerID="b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.538667 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "907d2835-1aee-4b5a-9726-a75946007030" (UID: "907d2835-1aee-4b5a-9726-a75946007030"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.546230 4940 scope.go:117] "RemoveContainer" containerID="2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.572842 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x54z7"] Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.573138 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x54z7" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerName="registry-server" containerID="cri-o://534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d" gracePeriod=2 Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.577904 4940 scope.go:117] "RemoveContainer" containerID="917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.578236 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.589736 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6b6zb\" (UniqueName: \"kubernetes.io/projected/907d2835-1aee-4b5a-9726-a75946007030-kube-api-access-6b6zb\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.589764 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.589773 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/907d2835-1aee-4b5a-9726-a75946007030-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.605176 4940 scope.go:117] "RemoveContainer" containerID="b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691" Nov 26 07:20:38 crc kubenswrapper[4940]: E1126 07:20:38.605699 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691\": container with ID starting with b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691 not found: ID does not exist" containerID="b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.605771 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691"} err="failed to get container status \"b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691\": rpc error: code = NotFound desc = could not find container \"b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691\": container with ID starting with b9df90440d7de1e4065efd5303623abfb964b634a9886112b859fa722f13a691 not found: ID does not exist" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.605804 4940 scope.go:117] "RemoveContainer" containerID="2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2" Nov 26 07:20:38 crc kubenswrapper[4940]: E1126 07:20:38.606199 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2\": container with ID starting with 2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2 not found: ID does not exist" containerID="2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.606238 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2"} err="failed to get container status \"2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2\": rpc error: code = NotFound desc = could not find container \"2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2\": container with ID starting with 2e0f17f2e893547e1c4106927243802af489b9bb3defddfa30824ef7318926d2 not found: ID does not exist" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.606267 4940 scope.go:117] "RemoveContainer" containerID="917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0" Nov 26 07:20:38 crc kubenswrapper[4940]: E1126 07:20:38.606608 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0\": container with ID starting with 917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0 not found: ID does not exist" containerID="917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.606641 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0"} err="failed to get container status \"917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0\": rpc error: code = NotFound desc = could not find container \"917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0\": container with ID starting with 917c76e14ad1b223d63e5b5c0bcb975561f634224822c4f7ae741c7fc1a53bd0 not found: ID does not exist" Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.829791 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w7c4d"] Nov 26 07:20:38 crc kubenswrapper[4940]: I1126 07:20:38.836663 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w7c4d"] Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.045610 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.174300 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" path="/var/lib/kubelet/pods/0eca03fd-c0fb-4900-a1de-1637a499e1cc/volumes" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.174894 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="907d2835-1aee-4b5a-9726-a75946007030" path="/var/lib/kubelet/pods/907d2835-1aee-4b5a-9726-a75946007030/volumes" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.175488 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" path="/var/lib/kubelet/pods/9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4/volumes" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.179025 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x9njh"] Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.179268 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x9njh" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerName="registry-server" containerID="cri-o://cb5f4ae9fa71b84665b254972ddd625f3fbd6feb149491959522a8083b949140" gracePeriod=2 Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.233085 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-catalog-content\") pod \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.233144 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-utilities\") pod \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.233181 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qrkk\" (UniqueName: \"kubernetes.io/projected/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-kube-api-access-2qrkk\") pod \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\" (UID: \"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f\") " Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.235308 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-utilities" (OuterVolumeSpecName: "utilities") pod "0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" (UID: "0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.236533 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-kube-api-access-2qrkk" (OuterVolumeSpecName: "kube-api-access-2qrkk") pod "0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" (UID: "0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f"). InnerVolumeSpecName "kube-api-access-2qrkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.249235 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.297275 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" (UID: "0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.305780 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.335298 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.335545 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.335676 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qrkk\" (UniqueName: \"kubernetes.io/projected/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f-kube-api-access-2qrkk\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.466023 4940 generic.go:334] "Generic (PLEG): container finished" podID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerID="cb5f4ae9fa71b84665b254972ddd625f3fbd6feb149491959522a8083b949140" exitCode=0 Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.466124 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9njh" event={"ID":"1743d256-8adb-4a95-a1d3-ec29d932191f","Type":"ContainerDied","Data":"cb5f4ae9fa71b84665b254972ddd625f3fbd6feb149491959522a8083b949140"} Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.471296 4940 generic.go:334] "Generic (PLEG): container finished" podID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerID="534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d" exitCode=0 Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.471504 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x54z7" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.472323 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x54z7" event={"ID":"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f","Type":"ContainerDied","Data":"534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d"} Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.472365 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x54z7" event={"ID":"0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f","Type":"ContainerDied","Data":"6c0ea66e149af2addaf4dd70a43b9e04ab89693ce8d6d55bd10317f82f1bf92e"} Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.472382 4940 scope.go:117] "RemoveContainer" containerID="534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.506415 4940 scope.go:117] "RemoveContainer" containerID="dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.514392 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x54z7"] Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.522884 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x54z7"] Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.532638 4940 scope.go:117] "RemoveContainer" containerID="b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.583871 4940 scope.go:117] "RemoveContainer" containerID="534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d" Nov 26 07:20:39 crc kubenswrapper[4940]: E1126 07:20:39.584736 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d\": container with ID starting with 534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d not found: ID does not exist" containerID="534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.584793 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d"} err="failed to get container status \"534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d\": rpc error: code = NotFound desc = could not find container \"534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d\": container with ID starting with 534db079ebfcb254d3b5e0e62378051aab15cceb5be3ea97a98fa37347d9f10d not found: ID does not exist" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.584835 4940 scope.go:117] "RemoveContainer" containerID="dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef" Nov 26 07:20:39 crc kubenswrapper[4940]: E1126 07:20:39.585281 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef\": container with ID starting with dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef not found: ID does not exist" containerID="dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.585338 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef"} err="failed to get container status \"dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef\": rpc error: code = NotFound desc = could not find container \"dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef\": container with ID starting with dc85c64c8ce31aeca848c0ac051e60c28874132c40419708fe2860bccbd3b4ef not found: ID does not exist" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.585382 4940 scope.go:117] "RemoveContainer" containerID="b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3" Nov 26 07:20:39 crc kubenswrapper[4940]: E1126 07:20:39.585775 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3\": container with ID starting with b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3 not found: ID does not exist" containerID="b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.585800 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3"} err="failed to get container status \"b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3\": rpc error: code = NotFound desc = could not find container \"b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3\": container with ID starting with b39506fb8e5f4b4f29c5ffb156e6a6e3d0caca95d0d0faf6a7b4b8dcfa0503e3 not found: ID does not exist" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.599968 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.741171 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-utilities\") pod \"1743d256-8adb-4a95-a1d3-ec29d932191f\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.741332 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-catalog-content\") pod \"1743d256-8adb-4a95-a1d3-ec29d932191f\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.741368 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjk77\" (UniqueName: \"kubernetes.io/projected/1743d256-8adb-4a95-a1d3-ec29d932191f-kube-api-access-sjk77\") pod \"1743d256-8adb-4a95-a1d3-ec29d932191f\" (UID: \"1743d256-8adb-4a95-a1d3-ec29d932191f\") " Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.742175 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-utilities" (OuterVolumeSpecName: "utilities") pod "1743d256-8adb-4a95-a1d3-ec29d932191f" (UID: "1743d256-8adb-4a95-a1d3-ec29d932191f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.747092 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1743d256-8adb-4a95-a1d3-ec29d932191f-kube-api-access-sjk77" (OuterVolumeSpecName: "kube-api-access-sjk77") pod "1743d256-8adb-4a95-a1d3-ec29d932191f" (UID: "1743d256-8adb-4a95-a1d3-ec29d932191f"). InnerVolumeSpecName "kube-api-access-sjk77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.771869 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fc97g"] Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.772157 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fc97g" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerName="registry-server" containerID="cri-o://e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9" gracePeriod=2 Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.788643 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1743d256-8adb-4a95-a1d3-ec29d932191f" (UID: "1743d256-8adb-4a95-a1d3-ec29d932191f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.843058 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.843093 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjk77\" (UniqueName: \"kubernetes.io/projected/1743d256-8adb-4a95-a1d3-ec29d932191f-kube-api-access-sjk77\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:39 crc kubenswrapper[4940]: I1126 07:20:39.843104 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1743d256-8adb-4a95-a1d3-ec29d932191f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.199434 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.351267 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-utilities\") pod \"bab18c4f-0062-48f3-b320-3e503c35d5b2\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.351340 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-catalog-content\") pod \"bab18c4f-0062-48f3-b320-3e503c35d5b2\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.351561 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ml7nn\" (UniqueName: \"kubernetes.io/projected/bab18c4f-0062-48f3-b320-3e503c35d5b2-kube-api-access-ml7nn\") pod \"bab18c4f-0062-48f3-b320-3e503c35d5b2\" (UID: \"bab18c4f-0062-48f3-b320-3e503c35d5b2\") " Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.352667 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-utilities" (OuterVolumeSpecName: "utilities") pod "bab18c4f-0062-48f3-b320-3e503c35d5b2" (UID: "bab18c4f-0062-48f3-b320-3e503c35d5b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.353360 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.362302 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bab18c4f-0062-48f3-b320-3e503c35d5b2-kube-api-access-ml7nn" (OuterVolumeSpecName: "kube-api-access-ml7nn") pod "bab18c4f-0062-48f3-b320-3e503c35d5b2" (UID: "bab18c4f-0062-48f3-b320-3e503c35d5b2"). InnerVolumeSpecName "kube-api-access-ml7nn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.370718 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9b495"] Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.371063 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9b495" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerName="registry-server" containerID="cri-o://fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a" gracePeriod=2 Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.405783 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bab18c4f-0062-48f3-b320-3e503c35d5b2" (UID: "bab18c4f-0062-48f3-b320-3e503c35d5b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.454281 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bab18c4f-0062-48f3-b320-3e503c35d5b2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.454318 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ml7nn\" (UniqueName: \"kubernetes.io/projected/bab18c4f-0062-48f3-b320-3e503c35d5b2-kube-api-access-ml7nn\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.506328 4940 generic.go:334] "Generic (PLEG): container finished" podID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerID="e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9" exitCode=0 Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.507334 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc97g" event={"ID":"bab18c4f-0062-48f3-b320-3e503c35d5b2","Type":"ContainerDied","Data":"e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9"} Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.507363 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc97g" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.507509 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc97g" event={"ID":"bab18c4f-0062-48f3-b320-3e503c35d5b2","Type":"ContainerDied","Data":"7dbcb11df99e47df550725d5874b4903cad211feb79773a1adf6d5ee9d12e9c2"} Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.507528 4940 scope.go:117] "RemoveContainer" containerID="e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.517772 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x9njh" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.517777 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x9njh" event={"ID":"1743d256-8adb-4a95-a1d3-ec29d932191f","Type":"ContainerDied","Data":"e290ec54c3660004e8e177a7832f3dbbd8010f0e5f5f03818b585efe6e5c262c"} Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.578008 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fc97g"] Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.589289 4940 scope.go:117] "RemoveContainer" containerID="c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.593098 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fc97g"] Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.600508 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x9njh"] Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.606617 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x9njh"] Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.612469 4940 scope.go:117] "RemoveContainer" containerID="a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.637853 4940 scope.go:117] "RemoveContainer" containerID="e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9" Nov 26 07:20:40 crc kubenswrapper[4940]: E1126 07:20:40.638420 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9\": container with ID starting with e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9 not found: ID does not exist" containerID="e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.638617 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9"} err="failed to get container status \"e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9\": rpc error: code = NotFound desc = could not find container \"e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9\": container with ID starting with e1c34592a0c463e00ffdb328dcd1c073d84bef71054b5e2d00e74074cbcdb5f9 not found: ID does not exist" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.638707 4940 scope.go:117] "RemoveContainer" containerID="c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e" Nov 26 07:20:40 crc kubenswrapper[4940]: E1126 07:20:40.639139 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e\": container with ID starting with c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e not found: ID does not exist" containerID="c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.639181 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e"} err="failed to get container status \"c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e\": rpc error: code = NotFound desc = could not find container \"c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e\": container with ID starting with c5a4fc8810ba8c4bded9168ba20f6395200d9d4f38a18fd00f7e30ea599d378e not found: ID does not exist" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.639214 4940 scope.go:117] "RemoveContainer" containerID="a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e" Nov 26 07:20:40 crc kubenswrapper[4940]: E1126 07:20:40.639528 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e\": container with ID starting with a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e not found: ID does not exist" containerID="a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.639556 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e"} err="failed to get container status \"a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e\": rpc error: code = NotFound desc = could not find container \"a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e\": container with ID starting with a9a9518907e829670ab173ae80e13b14a099e80b34e47cb94283a1407df49d7e not found: ID does not exist" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.639572 4940 scope.go:117] "RemoveContainer" containerID="cb5f4ae9fa71b84665b254972ddd625f3fbd6feb149491959522a8083b949140" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.656726 4940 scope.go:117] "RemoveContainer" containerID="195facecc86ce7c93440984f09b505bd3f7ecc04da195f45398f7c635f485a87" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.685223 4940 scope.go:117] "RemoveContainer" containerID="e3236911323995ddc6b5c3e70920f6c7439aab9fdc17e9ac7d19f1e9ac374e23" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.784981 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.960929 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-utilities\") pod \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.961013 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4d6l\" (UniqueName: \"kubernetes.io/projected/d9a480fc-1e8a-4f40-8add-a75b75641f4e-kube-api-access-p4d6l\") pod \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.961097 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-catalog-content\") pod \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\" (UID: \"d9a480fc-1e8a-4f40-8add-a75b75641f4e\") " Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.961998 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-utilities" (OuterVolumeSpecName: "utilities") pod "d9a480fc-1e8a-4f40-8add-a75b75641f4e" (UID: "d9a480fc-1e8a-4f40-8add-a75b75641f4e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.971922 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g2sqf"] Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.972310 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-g2sqf" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerName="registry-server" containerID="cri-o://977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14" gracePeriod=2 Nov 26 07:20:40 crc kubenswrapper[4940]: I1126 07:20:40.972319 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9a480fc-1e8a-4f40-8add-a75b75641f4e-kube-api-access-p4d6l" (OuterVolumeSpecName: "kube-api-access-p4d6l") pod "d9a480fc-1e8a-4f40-8add-a75b75641f4e" (UID: "d9a480fc-1e8a-4f40-8add-a75b75641f4e"). InnerVolumeSpecName "kube-api-access-p4d6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.012575 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d9a480fc-1e8a-4f40-8add-a75b75641f4e" (UID: "d9a480fc-1e8a-4f40-8add-a75b75641f4e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.063104 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.063633 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9a480fc-1e8a-4f40-8add-a75b75641f4e-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.063770 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4d6l\" (UniqueName: \"kubernetes.io/projected/d9a480fc-1e8a-4f40-8add-a75b75641f4e-kube-api-access-p4d6l\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.178116 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" path="/var/lib/kubelet/pods/0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f/volumes" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.178721 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" path="/var/lib/kubelet/pods/1743d256-8adb-4a95-a1d3-ec29d932191f/volumes" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.179388 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" path="/var/lib/kubelet/pods/bab18c4f-0062-48f3-b320-3e503c35d5b2/volumes" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.375402 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wsh9l"] Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.375613 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wsh9l" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerName="registry-server" containerID="cri-o://0e2f214b56a4574e98fcb309e09713a98d62f7a8f14acf0f28bde2d659fe7c0e" gracePeriod=2 Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.513463 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.536223 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerID="0e2f214b56a4574e98fcb309e09713a98d62f7a8f14acf0f28bde2d659fe7c0e" exitCode=0 Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.536280 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wsh9l" event={"ID":"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797","Type":"ContainerDied","Data":"0e2f214b56a4574e98fcb309e09713a98d62f7a8f14acf0f28bde2d659fe7c0e"} Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.554191 4940 generic.go:334] "Generic (PLEG): container finished" podID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerID="977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14" exitCode=0 Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.554252 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g2sqf" event={"ID":"3c5158dd-e8a1-48fa-a742-32c73dafd95b","Type":"ContainerDied","Data":"977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14"} Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.554280 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g2sqf" event={"ID":"3c5158dd-e8a1-48fa-a742-32c73dafd95b","Type":"ContainerDied","Data":"1be4b75a524534064ec6eba547d04d1bc1bd0c611222ad01925d32ab990a9d09"} Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.554298 4940 scope.go:117] "RemoveContainer" containerID="977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.554396 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g2sqf" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.591753 4940 generic.go:334] "Generic (PLEG): container finished" podID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerID="fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a" exitCode=0 Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.591832 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9b495" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.591833 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b495" event={"ID":"d9a480fc-1e8a-4f40-8add-a75b75641f4e","Type":"ContainerDied","Data":"fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a"} Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.591954 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9b495" event={"ID":"d9a480fc-1e8a-4f40-8add-a75b75641f4e","Type":"ContainerDied","Data":"274bb19b3e3f055fcea216b3aa9c43890c582ef64c21b073e7d781d18a127332"} Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.607303 4940 scope.go:117] "RemoveContainer" containerID="6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.618052 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9b495"] Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.626433 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9b495"] Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.640881 4940 scope.go:117] "RemoveContainer" containerID="0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.666218 4940 scope.go:117] "RemoveContainer" containerID="977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14" Nov 26 07:20:41 crc kubenswrapper[4940]: E1126 07:20:41.666943 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14\": container with ID starting with 977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14 not found: ID does not exist" containerID="977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.666973 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14"} err="failed to get container status \"977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14\": rpc error: code = NotFound desc = could not find container \"977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14\": container with ID starting with 977a23acba391525bfce7fd843b3d3357ee40cda8972c4fdd9d2a6dfab8c4c14 not found: ID does not exist" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.666996 4940 scope.go:117] "RemoveContainer" containerID="6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9" Nov 26 07:20:41 crc kubenswrapper[4940]: E1126 07:20:41.667351 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9\": container with ID starting with 6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9 not found: ID does not exist" containerID="6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.667376 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9"} err="failed to get container status \"6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9\": rpc error: code = NotFound desc = could not find container \"6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9\": container with ID starting with 6a04ca476fc918fbee3c1f65e232c4b1537ce0811c830c58ae7da0f4462a73f9 not found: ID does not exist" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.667390 4940 scope.go:117] "RemoveContainer" containerID="0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8" Nov 26 07:20:41 crc kubenswrapper[4940]: E1126 07:20:41.667695 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8\": container with ID starting with 0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8 not found: ID does not exist" containerID="0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.667713 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8"} err="failed to get container status \"0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8\": rpc error: code = NotFound desc = could not find container \"0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8\": container with ID starting with 0b6ae58627a34c0f5e5a9b19b0a04cc3582ff95430d372407361e0cc6b41f0b8 not found: ID does not exist" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.667726 4940 scope.go:117] "RemoveContainer" containerID="fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.679717 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsv6\" (UniqueName: \"kubernetes.io/projected/3c5158dd-e8a1-48fa-a742-32c73dafd95b-kube-api-access-dbsv6\") pod \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.679782 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-utilities\") pod \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.679967 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-catalog-content\") pod \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\" (UID: \"3c5158dd-e8a1-48fa-a742-32c73dafd95b\") " Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.680887 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-utilities" (OuterVolumeSpecName: "utilities") pod "3c5158dd-e8a1-48fa-a742-32c73dafd95b" (UID: "3c5158dd-e8a1-48fa-a742-32c73dafd95b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.709695 4940 scope.go:117] "RemoveContainer" containerID="dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.709906 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c5158dd-e8a1-48fa-a742-32c73dafd95b-kube-api-access-dbsv6" (OuterVolumeSpecName: "kube-api-access-dbsv6") pod "3c5158dd-e8a1-48fa-a742-32c73dafd95b" (UID: "3c5158dd-e8a1-48fa-a742-32c73dafd95b"). InnerVolumeSpecName "kube-api-access-dbsv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.781273 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r7lxz"] Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.781518 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r7lxz" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerName="registry-server" containerID="cri-o://1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd" gracePeriod=2 Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.781847 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsv6\" (UniqueName: \"kubernetes.io/projected/3c5158dd-e8a1-48fa-a742-32c73dafd95b-kube-api-access-dbsv6\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.781871 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.787510 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3c5158dd-e8a1-48fa-a742-32c73dafd95b" (UID: "3c5158dd-e8a1-48fa-a742-32c73dafd95b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.796397 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.816930 4940 scope.go:117] "RemoveContainer" containerID="c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.849831 4940 scope.go:117] "RemoveContainer" containerID="fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a" Nov 26 07:20:41 crc kubenswrapper[4940]: E1126 07:20:41.850268 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a\": container with ID starting with fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a not found: ID does not exist" containerID="fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.850292 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a"} err="failed to get container status \"fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a\": rpc error: code = NotFound desc = could not find container \"fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a\": container with ID starting with fbbe0567ab901cf0e14a0bfad1bf75d2ce0f42635ae8deb50cbf75927e76f14a not found: ID does not exist" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.850311 4940 scope.go:117] "RemoveContainer" containerID="dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b" Nov 26 07:20:41 crc kubenswrapper[4940]: E1126 07:20:41.850659 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b\": container with ID starting with dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b not found: ID does not exist" containerID="dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.850676 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b"} err="failed to get container status \"dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b\": rpc error: code = NotFound desc = could not find container \"dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b\": container with ID starting with dd5bb47e67dbd19f30d38c12797f16f42333ee11aab0800a49529a49cca5682b not found: ID does not exist" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.850687 4940 scope.go:117] "RemoveContainer" containerID="c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2" Nov 26 07:20:41 crc kubenswrapper[4940]: E1126 07:20:41.850894 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2\": container with ID starting with c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2 not found: ID does not exist" containerID="c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.850921 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2"} err="failed to get container status \"c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2\": rpc error: code = NotFound desc = could not find container \"c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2\": container with ID starting with c3e6e86a9be222d469b46cef9cd1c05c0949e5ae49f243d9c45d7158df6c1ec2 not found: ID does not exist" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.882139 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-utilities\") pod \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.882263 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cm8q\" (UniqueName: \"kubernetes.io/projected/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-kube-api-access-8cm8q\") pod \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.882285 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-catalog-content\") pod \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\" (UID: \"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797\") " Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.882466 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c5158dd-e8a1-48fa-a742-32c73dafd95b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.883029 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-utilities" (OuterVolumeSpecName: "utilities") pod "1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" (UID: "1ee6f9b2-a68f-4ff0-bc6c-48f12275e797"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.885110 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-kube-api-access-8cm8q" (OuterVolumeSpecName: "kube-api-access-8cm8q") pod "1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" (UID: "1ee6f9b2-a68f-4ff0-bc6c-48f12275e797"). InnerVolumeSpecName "kube-api-access-8cm8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.934353 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" (UID: "1ee6f9b2-a68f-4ff0-bc6c-48f12275e797"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.980603 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g2sqf"] Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.983990 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cm8q\" (UniqueName: \"kubernetes.io/projected/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-kube-api-access-8cm8q\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.984036 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.984065 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:41 crc kubenswrapper[4940]: I1126 07:20:41.994275 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-g2sqf"] Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.259221 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.288436 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-utilities\") pod \"fdaa4f87-0d23-46ca-882d-eb01a4482290\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.288500 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-catalog-content\") pod \"fdaa4f87-0d23-46ca-882d-eb01a4482290\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.288573 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndxcs\" (UniqueName: \"kubernetes.io/projected/fdaa4f87-0d23-46ca-882d-eb01a4482290-kube-api-access-ndxcs\") pod \"fdaa4f87-0d23-46ca-882d-eb01a4482290\" (UID: \"fdaa4f87-0d23-46ca-882d-eb01a4482290\") " Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.289419 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-utilities" (OuterVolumeSpecName: "utilities") pod "fdaa4f87-0d23-46ca-882d-eb01a4482290" (UID: "fdaa4f87-0d23-46ca-882d-eb01a4482290"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.296233 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdaa4f87-0d23-46ca-882d-eb01a4482290-kube-api-access-ndxcs" (OuterVolumeSpecName: "kube-api-access-ndxcs") pod "fdaa4f87-0d23-46ca-882d-eb01a4482290" (UID: "fdaa4f87-0d23-46ca-882d-eb01a4482290"). InnerVolumeSpecName "kube-api-access-ndxcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.338270 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fdaa4f87-0d23-46ca-882d-eb01a4482290" (UID: "fdaa4f87-0d23-46ca-882d-eb01a4482290"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.389664 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.389696 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdaa4f87-0d23-46ca-882d-eb01a4482290-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.389709 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndxcs\" (UniqueName: \"kubernetes.io/projected/fdaa4f87-0d23-46ca-882d-eb01a4482290-kube-api-access-ndxcs\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.608630 4940 generic.go:334] "Generic (PLEG): container finished" podID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerID="1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd" exitCode=0 Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.608683 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r7lxz" event={"ID":"fdaa4f87-0d23-46ca-882d-eb01a4482290","Type":"ContainerDied","Data":"1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd"} Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.608705 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r7lxz" event={"ID":"fdaa4f87-0d23-46ca-882d-eb01a4482290","Type":"ContainerDied","Data":"e09db8a378839a6731b9923af097af450d62e3921a1539ae496d15a0749f2ed3"} Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.608722 4940 scope.go:117] "RemoveContainer" containerID="1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.608821 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r7lxz" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.614867 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wsh9l" event={"ID":"1ee6f9b2-a68f-4ff0-bc6c-48f12275e797","Type":"ContainerDied","Data":"c117eabac4d8806f0df3151fbbd638c2401149dec47159b2585c798a8d6f1e5e"} Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.614971 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wsh9l" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.642710 4940 scope.go:117] "RemoveContainer" containerID="486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.649078 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r7lxz"] Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.659429 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r7lxz"] Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.667288 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wsh9l"] Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.672942 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wsh9l"] Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.687358 4940 scope.go:117] "RemoveContainer" containerID="0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.715499 4940 scope.go:117] "RemoveContainer" containerID="1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd" Nov 26 07:20:42 crc kubenswrapper[4940]: E1126 07:20:42.716406 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd\": container with ID starting with 1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd not found: ID does not exist" containerID="1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.716459 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd"} err="failed to get container status \"1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd\": rpc error: code = NotFound desc = could not find container \"1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd\": container with ID starting with 1224a6c0bbfe5f451a6f0796ff74e5408ada2d039d00a293fff323d5a598dadd not found: ID does not exist" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.716491 4940 scope.go:117] "RemoveContainer" containerID="486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07" Nov 26 07:20:42 crc kubenswrapper[4940]: E1126 07:20:42.716778 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07\": container with ID starting with 486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07 not found: ID does not exist" containerID="486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.716812 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07"} err="failed to get container status \"486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07\": rpc error: code = NotFound desc = could not find container \"486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07\": container with ID starting with 486f10c388c25893944d8d864145db7a79b3cd0ba509d810611cd2d502449d07 not found: ID does not exist" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.716839 4940 scope.go:117] "RemoveContainer" containerID="0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa" Nov 26 07:20:42 crc kubenswrapper[4940]: E1126 07:20:42.717328 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa\": container with ID starting with 0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa not found: ID does not exist" containerID="0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.717360 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa"} err="failed to get container status \"0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa\": rpc error: code = NotFound desc = could not find container \"0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa\": container with ID starting with 0e3bdcb9e2f91481f2b5d4124d90065b0cc9eb36aa840895fa5dd517f90b8daa not found: ID does not exist" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.717379 4940 scope.go:117] "RemoveContainer" containerID="0e2f214b56a4574e98fcb309e09713a98d62f7a8f14acf0f28bde2d659fe7c0e" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.744548 4940 scope.go:117] "RemoveContainer" containerID="fba8b17b6f1c32b3e9e139b2c0b5cee95f680958fb2d593d6a049073c7081b01" Nov 26 07:20:42 crc kubenswrapper[4940]: I1126 07:20:42.767181 4940 scope.go:117] "RemoveContainer" containerID="d02857e47d963cfda61f48de40c15ac3a6804d565e87dfc3dfb6f8345605a482" Nov 26 07:20:43 crc kubenswrapper[4940]: I1126 07:20:43.201594 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" path="/var/lib/kubelet/pods/1ee6f9b2-a68f-4ff0-bc6c-48f12275e797/volumes" Nov 26 07:20:43 crc kubenswrapper[4940]: I1126 07:20:43.202511 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" path="/var/lib/kubelet/pods/3c5158dd-e8a1-48fa-a742-32c73dafd95b/volumes" Nov 26 07:20:43 crc kubenswrapper[4940]: I1126 07:20:43.203367 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" path="/var/lib/kubelet/pods/d9a480fc-1e8a-4f40-8add-a75b75641f4e/volumes" Nov 26 07:20:43 crc kubenswrapper[4940]: I1126 07:20:43.205551 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" path="/var/lib/kubelet/pods/fdaa4f87-0d23-46ca-882d-eb01a4482290/volumes" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.174970 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9xjr2"] Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.175513 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9xjr2" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="registry-server" containerID="cri-o://3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a" gracePeriod=2 Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.370158 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clc2j"] Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.370393 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-clc2j" podUID="7e29fd36-80e8-4803-a438-6563640d769d" containerName="registry-server" containerID="cri-o://b261de9b496f56339ed26c834c5dfd9e79095cea67e27556569938928e8cb1df" gracePeriod=2 Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.550939 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.585450 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dzxrb"] Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.585723 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dzxrb" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="registry-server" containerID="cri-o://104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d" gracePeriod=2 Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.639409 4940 generic.go:334] "Generic (PLEG): container finished" podID="7e29fd36-80e8-4803-a438-6563640d769d" containerID="b261de9b496f56339ed26c834c5dfd9e79095cea67e27556569938928e8cb1df" exitCode=0 Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.639474 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clc2j" event={"ID":"7e29fd36-80e8-4803-a438-6563640d769d","Type":"ContainerDied","Data":"b261de9b496f56339ed26c834c5dfd9e79095cea67e27556569938928e8cb1df"} Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.647344 4940 generic.go:334] "Generic (PLEG): container finished" podID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerID="3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a" exitCode=0 Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.647380 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xjr2" event={"ID":"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627","Type":"ContainerDied","Data":"3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a"} Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.647422 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xjr2" event={"ID":"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627","Type":"ContainerDied","Data":"986ea95aa97a4f35f02d125cfcededfb3f8e2c80f307e74d055477090fb71a90"} Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.647427 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9xjr2" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.647438 4940 scope.go:117] "RemoveContainer" containerID="3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.695765 4940 scope.go:117] "RemoveContainer" containerID="f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.727951 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47jjq\" (UniqueName: \"kubernetes.io/projected/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-kube-api-access-47jjq\") pod \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.728090 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-catalog-content\") pod \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.728188 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-utilities\") pod \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\" (UID: \"c6bdf3ca-78b6-4f21-ad0e-f51e77a09627\") " Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.731059 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-utilities" (OuterVolumeSpecName: "utilities") pod "c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" (UID: "c6bdf3ca-78b6-4f21-ad0e-f51e77a09627"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.734419 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-kube-api-access-47jjq" (OuterVolumeSpecName: "kube-api-access-47jjq") pod "c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" (UID: "c6bdf3ca-78b6-4f21-ad0e-f51e77a09627"). InnerVolumeSpecName "kube-api-access-47jjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.770198 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbwh9"] Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.770465 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dbwh9" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerName="registry-server" containerID="cri-o://ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e" gracePeriod=2 Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.784775 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.790376 4940 scope.go:117] "RemoveContainer" containerID="7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.818162 4940 scope.go:117] "RemoveContainer" containerID="3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a" Nov 26 07:20:44 crc kubenswrapper[4940]: E1126 07:20:44.819366 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a\": container with ID starting with 3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a not found: ID does not exist" containerID="3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.819414 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a"} err="failed to get container status \"3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a\": rpc error: code = NotFound desc = could not find container \"3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a\": container with ID starting with 3affc37011873a52036f60f38c3e54953a5db5ee71adb5504a4e872b28a1ea1a not found: ID does not exist" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.819438 4940 scope.go:117] "RemoveContainer" containerID="f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109" Nov 26 07:20:44 crc kubenswrapper[4940]: E1126 07:20:44.819745 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109\": container with ID starting with f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109 not found: ID does not exist" containerID="f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.819778 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109"} err="failed to get container status \"f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109\": rpc error: code = NotFound desc = could not find container \"f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109\": container with ID starting with f105b1692738ca078e7f7b717ced1c5a20d558df46631d0f2c6dccc2f0efa109 not found: ID does not exist" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.819801 4940 scope.go:117] "RemoveContainer" containerID="7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853" Nov 26 07:20:44 crc kubenswrapper[4940]: E1126 07:20:44.820163 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853\": container with ID starting with 7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853 not found: ID does not exist" containerID="7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.820189 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853"} err="failed to get container status \"7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853\": rpc error: code = NotFound desc = could not find container \"7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853\": container with ID starting with 7eb0cbc39e0275a640bd82702f3576ea0c8c18fcaf8e4e88072e6da8197f7853 not found: ID does not exist" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.857546 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-catalog-content\") pod \"7e29fd36-80e8-4803-a438-6563640d769d\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.857801 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bx9nl\" (UniqueName: \"kubernetes.io/projected/7e29fd36-80e8-4803-a438-6563640d769d-kube-api-access-bx9nl\") pod \"7e29fd36-80e8-4803-a438-6563640d769d\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.857865 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-utilities\") pod \"7e29fd36-80e8-4803-a438-6563640d769d\" (UID: \"7e29fd36-80e8-4803-a438-6563640d769d\") " Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.858208 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.858223 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47jjq\" (UniqueName: \"kubernetes.io/projected/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-kube-api-access-47jjq\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.862138 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" (UID: "c6bdf3ca-78b6-4f21-ad0e-f51e77a09627"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.874241 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-utilities" (OuterVolumeSpecName: "utilities") pod "7e29fd36-80e8-4803-a438-6563640d769d" (UID: "7e29fd36-80e8-4803-a438-6563640d769d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.901646 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e29fd36-80e8-4803-a438-6563640d769d" (UID: "7e29fd36-80e8-4803-a438-6563640d769d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.907519 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e29fd36-80e8-4803-a438-6563640d769d-kube-api-access-bx9nl" (OuterVolumeSpecName: "kube-api-access-bx9nl") pod "7e29fd36-80e8-4803-a438-6563640d769d" (UID: "7e29fd36-80e8-4803-a438-6563640d769d"). InnerVolumeSpecName "kube-api-access-bx9nl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.964506 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.964544 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e29fd36-80e8-4803-a438-6563640d769d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.964560 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.964573 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bx9nl\" (UniqueName: \"kubernetes.io/projected/7e29fd36-80e8-4803-a438-6563640d769d-kube-api-access-bx9nl\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.968817 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-frx6v"] Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.969086 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-frx6v" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="registry-server" containerID="cri-o://4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c" gracePeriod=2 Nov 26 07:20:44 crc kubenswrapper[4940]: E1126 07:20:44.979944 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod199e1cfb_f5d2_4889_ba11_6ae0596f3dff.slice/crio-ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod199e1cfb_f5d2_4889_ba11_6ae0596f3dff.slice/crio-conmon-ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.987156 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9xjr2"] Nov 26 07:20:44 crc kubenswrapper[4940]: I1126 07:20:44.992916 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9xjr2"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.181897 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.182417 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" path="/var/lib/kubelet/pods/c6bdf3ca-78b6-4f21-ad0e-f51e77a09627/volumes" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.184202 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-flxv9"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.184465 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-flxv9" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerName="registry-server" containerID="cri-o://ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da" gracePeriod=2 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.295459 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xd5bc\" (UniqueName: \"kubernetes.io/projected/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-kube-api-access-xd5bc\") pod \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.295661 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-utilities\") pod \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.295762 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-catalog-content\") pod \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\" (UID: \"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.299247 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-kube-api-access-xd5bc" (OuterVolumeSpecName: "kube-api-access-xd5bc") pod "3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" (UID: "3a5ddf3b-edb0-43af-ac3d-234e6ee82f91"). InnerVolumeSpecName "kube-api-access-xd5bc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.303129 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-utilities" (OuterVolumeSpecName: "utilities") pod "3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" (UID: "3a5ddf3b-edb0-43af-ac3d-234e6ee82f91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.370588 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l2gwh"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.370824 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l2gwh" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerName="registry-server" containerID="cri-o://90615a282478c166895f4253b050436ec2d818b99f026886839941e78dbbd47d" gracePeriod=2 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.398098 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xd5bc\" (UniqueName: \"kubernetes.io/projected/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-kube-api-access-xd5bc\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.398129 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.408579 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.414400 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.418177 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" (UID: "3a5ddf3b-edb0-43af-ac3d-234e6ee82f91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.498756 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-catalog-content\") pod \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.498880 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-catalog-content\") pod \"dfef8b09-5a70-4b1e-8287-763763cd8b99\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.498906 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jknzc\" (UniqueName: \"kubernetes.io/projected/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-kube-api-access-jknzc\") pod \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.498943 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-utilities\") pod \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\" (UID: \"199e1cfb-f5d2-4889-ba11-6ae0596f3dff\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.499075 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdx98\" (UniqueName: \"kubernetes.io/projected/dfef8b09-5a70-4b1e-8287-763763cd8b99-kube-api-access-vdx98\") pod \"dfef8b09-5a70-4b1e-8287-763763cd8b99\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.499096 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-utilities\") pod \"dfef8b09-5a70-4b1e-8287-763763cd8b99\" (UID: \"dfef8b09-5a70-4b1e-8287-763763cd8b99\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.499372 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.499960 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-utilities" (OuterVolumeSpecName: "utilities") pod "dfef8b09-5a70-4b1e-8287-763763cd8b99" (UID: "dfef8b09-5a70-4b1e-8287-763763cd8b99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.500100 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-utilities" (OuterVolumeSpecName: "utilities") pod "199e1cfb-f5d2-4889-ba11-6ae0596f3dff" (UID: "199e1cfb-f5d2-4889-ba11-6ae0596f3dff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.503244 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfef8b09-5a70-4b1e-8287-763763cd8b99-kube-api-access-vdx98" (OuterVolumeSpecName: "kube-api-access-vdx98") pod "dfef8b09-5a70-4b1e-8287-763763cd8b99" (UID: "dfef8b09-5a70-4b1e-8287-763763cd8b99"). InnerVolumeSpecName "kube-api-access-vdx98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.503427 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-kube-api-access-jknzc" (OuterVolumeSpecName: "kube-api-access-jknzc") pod "199e1cfb-f5d2-4889-ba11-6ae0596f3dff" (UID: "199e1cfb-f5d2-4889-ba11-6ae0596f3dff"). InnerVolumeSpecName "kube-api-access-jknzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.522427 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "199e1cfb-f5d2-4889-ba11-6ae0596f3dff" (UID: "199e1cfb-f5d2-4889-ba11-6ae0596f3dff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.580837 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jnr7b"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.581251 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jnr7b" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerName="registry-server" containerID="cri-o://85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1" gracePeriod=2 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.588330 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dfef8b09-5a70-4b1e-8287-763763cd8b99" (UID: "dfef8b09-5a70-4b1e-8287-763763cd8b99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.590296 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.601760 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.601787 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jknzc\" (UniqueName: \"kubernetes.io/projected/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-kube-api-access-jknzc\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.601799 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.601809 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdx98\" (UniqueName: \"kubernetes.io/projected/dfef8b09-5a70-4b1e-8287-763763cd8b99-kube-api-access-vdx98\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.601817 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfef8b09-5a70-4b1e-8287-763763cd8b99-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.601825 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/199e1cfb-f5d2-4889-ba11-6ae0596f3dff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.671462 4940 generic.go:334] "Generic (PLEG): container finished" podID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerID="ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e" exitCode=0 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.671565 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbwh9" event={"ID":"199e1cfb-f5d2-4889-ba11-6ae0596f3dff","Type":"ContainerDied","Data":"ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.671611 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dbwh9" event={"ID":"199e1cfb-f5d2-4889-ba11-6ae0596f3dff","Type":"ContainerDied","Data":"d0b58269ecdf5b8d0a29473db70732fa6e163f6d4aa678f49b93ad4359fdb8ea"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.671642 4940 scope.go:117] "RemoveContainer" containerID="ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.671840 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dbwh9" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.674990 4940 generic.go:334] "Generic (PLEG): container finished" podID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerID="4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c" exitCode=0 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.675188 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-frx6v" event={"ID":"dfef8b09-5a70-4b1e-8287-763763cd8b99","Type":"ContainerDied","Data":"4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.675223 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-frx6v" event={"ID":"dfef8b09-5a70-4b1e-8287-763763cd8b99","Type":"ContainerDied","Data":"16b7fddcba27ec8372fd3b85ae11bb18f25a98998e53a1c862ccbc7a974bc148"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.675306 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-frx6v" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.678520 4940 generic.go:334] "Generic (PLEG): container finished" podID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerID="90615a282478c166895f4253b050436ec2d818b99f026886839941e78dbbd47d" exitCode=0 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.678593 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2gwh" event={"ID":"0282d0f2-66d7-40e4-96ed-364f44b4b372","Type":"ContainerDied","Data":"90615a282478c166895f4253b050436ec2d818b99f026886839941e78dbbd47d"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.686102 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clc2j" event={"ID":"7e29fd36-80e8-4803-a438-6563640d769d","Type":"ContainerDied","Data":"2ab0b11c46095107e056f486752f4b9dca3211ff06bd62e4c666efc2e95b17ff"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.686127 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clc2j" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.689919 4940 generic.go:334] "Generic (PLEG): container finished" podID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerID="ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da" exitCode=0 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.689968 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-flxv9" event={"ID":"a5c11eb7-a10a-486d-a692-2f3196c2cdbd","Type":"ContainerDied","Data":"ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.689994 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-flxv9" event={"ID":"a5c11eb7-a10a-486d-a692-2f3196c2cdbd","Type":"ContainerDied","Data":"fb0f91ebc879f67e557532ad02e73d0744d9db82742d932c8872cefe569944d7"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.690069 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-flxv9" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.693490 4940 generic.go:334] "Generic (PLEG): container finished" podID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerID="104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d" exitCode=0 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.693549 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzxrb" event={"ID":"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91","Type":"ContainerDied","Data":"104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.693589 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dzxrb" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.693590 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dzxrb" event={"ID":"3a5ddf3b-edb0-43af-ac3d-234e6ee82f91","Type":"ContainerDied","Data":"dc60c9e48df89c83ddefde98c9a9a37ee9101f7d225977ccda712d67da363b0a"} Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.703244 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8l62\" (UniqueName: \"kubernetes.io/projected/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-kube-api-access-g8l62\") pod \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.703276 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-catalog-content\") pod \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.703408 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-utilities\") pod \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\" (UID: \"a5c11eb7-a10a-486d-a692-2f3196c2cdbd\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.704497 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-utilities" (OuterVolumeSpecName: "utilities") pod "a5c11eb7-a10a-486d-a692-2f3196c2cdbd" (UID: "a5c11eb7-a10a-486d-a692-2f3196c2cdbd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.706716 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-kube-api-access-g8l62" (OuterVolumeSpecName: "kube-api-access-g8l62") pod "a5c11eb7-a10a-486d-a692-2f3196c2cdbd" (UID: "a5c11eb7-a10a-486d-a692-2f3196c2cdbd"). InnerVolumeSpecName "kube-api-access-g8l62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.707742 4940 scope.go:117] "RemoveContainer" containerID="ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.708502 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.727341 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbwh9"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.739846 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5c11eb7-a10a-486d-a692-2f3196c2cdbd" (UID: "a5c11eb7-a10a-486d-a692-2f3196c2cdbd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.739919 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dbwh9"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.751574 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-frx6v"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.759251 4940 scope.go:117] "RemoveContainer" containerID="864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.761140 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-frx6v"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.777599 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clc2j"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.783337 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-clc2j"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.786601 4940 scope.go:117] "RemoveContainer" containerID="ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e" Nov 26 07:20:45 crc kubenswrapper[4940]: E1126 07:20:45.786992 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e\": container with ID starting with ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e not found: ID does not exist" containerID="ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.787020 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e"} err="failed to get container status \"ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e\": rpc error: code = NotFound desc = could not find container \"ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e\": container with ID starting with ed152a72d44f133d99c1cc56776dfe449acc288aa94395c2630000da445b366e not found: ID does not exist" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.787055 4940 scope.go:117] "RemoveContainer" containerID="ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700" Nov 26 07:20:45 crc kubenswrapper[4940]: E1126 07:20:45.787317 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700\": container with ID starting with ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700 not found: ID does not exist" containerID="ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.787335 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700"} err="failed to get container status \"ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700\": rpc error: code = NotFound desc = could not find container \"ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700\": container with ID starting with ac3a387934c7215c5efd6eb0ec195faaff05523f7e4ff30d5234bafc02e49700 not found: ID does not exist" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.787349 4940 scope.go:117] "RemoveContainer" containerID="864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f" Nov 26 07:20:45 crc kubenswrapper[4940]: E1126 07:20:45.787533 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f\": container with ID starting with 864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f not found: ID does not exist" containerID="864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.787551 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f"} err="failed to get container status \"864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f\": rpc error: code = NotFound desc = could not find container \"864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f\": container with ID starting with 864990bb9b3899b1c79aea3badce54619b8e00719852e1b56aa2741e14cdcd0f not found: ID does not exist" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.787563 4940 scope.go:117] "RemoveContainer" containerID="4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.789923 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dzxrb"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.795682 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5rdpw"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.795925 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5rdpw" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerName="registry-server" containerID="cri-o://57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d" gracePeriod=2 Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.801556 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dzxrb"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.804421 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbkkr\" (UniqueName: \"kubernetes.io/projected/0282d0f2-66d7-40e4-96ed-364f44b4b372-kube-api-access-gbkkr\") pod \"0282d0f2-66d7-40e4-96ed-364f44b4b372\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.804583 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-utilities\") pod \"0282d0f2-66d7-40e4-96ed-364f44b4b372\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.804609 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-catalog-content\") pod \"0282d0f2-66d7-40e4-96ed-364f44b4b372\" (UID: \"0282d0f2-66d7-40e4-96ed-364f44b4b372\") " Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.804858 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.804880 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8l62\" (UniqueName: \"kubernetes.io/projected/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-kube-api-access-g8l62\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.804890 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c11eb7-a10a-486d-a692-2f3196c2cdbd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.806358 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-utilities" (OuterVolumeSpecName: "utilities") pod "0282d0f2-66d7-40e4-96ed-364f44b4b372" (UID: "0282d0f2-66d7-40e4-96ed-364f44b4b372"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.807585 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0282d0f2-66d7-40e4-96ed-364f44b4b372-kube-api-access-gbkkr" (OuterVolumeSpecName: "kube-api-access-gbkkr") pod "0282d0f2-66d7-40e4-96ed-364f44b4b372" (UID: "0282d0f2-66d7-40e4-96ed-364f44b4b372"). InnerVolumeSpecName "kube-api-access-gbkkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.811964 4940 scope.go:117] "RemoveContainer" containerID="4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.834129 4940 scope.go:117] "RemoveContainer" containerID="f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.864833 4940 scope.go:117] "RemoveContainer" containerID="4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c" Nov 26 07:20:45 crc kubenswrapper[4940]: E1126 07:20:45.865257 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c\": container with ID starting with 4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c not found: ID does not exist" containerID="4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.865297 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c"} err="failed to get container status \"4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c\": rpc error: code = NotFound desc = could not find container \"4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c\": container with ID starting with 4a313ee5693f4b9a6eb578a16337a2b3b2ed1e18e0ed5534af757a8eb96dd27c not found: ID does not exist" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.865325 4940 scope.go:117] "RemoveContainer" containerID="4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32" Nov 26 07:20:45 crc kubenswrapper[4940]: E1126 07:20:45.865593 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32\": container with ID starting with 4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32 not found: ID does not exist" containerID="4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.865619 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32"} err="failed to get container status \"4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32\": rpc error: code = NotFound desc = could not find container \"4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32\": container with ID starting with 4c97a316ffa4ad8b525c84ef954748a1e6af4e9079bcb61d6cb72bcd4d991d32 not found: ID does not exist" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.865641 4940 scope.go:117] "RemoveContainer" containerID="f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc" Nov 26 07:20:45 crc kubenswrapper[4940]: E1126 07:20:45.865813 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc\": container with ID starting with f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc not found: ID does not exist" containerID="f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.865831 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc"} err="failed to get container status \"f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc\": rpc error: code = NotFound desc = could not find container \"f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc\": container with ID starting with f9f9d419b6ff8331a59e446b8561b47ec6e0c61a3e21a51d5586beb77f1bdbfc not found: ID does not exist" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.865843 4940 scope.go:117] "RemoveContainer" containerID="b261de9b496f56339ed26c834c5dfd9e79095cea67e27556569938928e8cb1df" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.906966 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.906991 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbkkr\" (UniqueName: \"kubernetes.io/projected/0282d0f2-66d7-40e4-96ed-364f44b4b372-kube-api-access-gbkkr\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.908484 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0282d0f2-66d7-40e4-96ed-364f44b4b372" (UID: "0282d0f2-66d7-40e4-96ed-364f44b4b372"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.915484 4940 scope.go:117] "RemoveContainer" containerID="d2b035c28fe954cbeac2f5dcbcdc37fe18b75aaef1457f34ebfb76b1becbd2b4" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.931231 4940 scope.go:117] "RemoveContainer" containerID="6a7efed5aa3d50cf1f1fa4d0eb24f7616ee7573ca72f94e47cdd645871c010ab" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.949842 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.954547 4940 scope.go:117] "RemoveContainer" containerID="ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.979710 4940 scope.go:117] "RemoveContainer" containerID="bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82" Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.989327 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-85cvv"] Nov 26 07:20:45 crc kubenswrapper[4940]: I1126 07:20:45.989630 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-85cvv" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerName="registry-server" containerID="cri-o://ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859" gracePeriod=2 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.007613 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-utilities\") pod \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.007669 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnfd8\" (UniqueName: \"kubernetes.io/projected/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-kube-api-access-nnfd8\") pod \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.007735 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-catalog-content\") pod \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\" (UID: \"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.008429 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0282d0f2-66d7-40e4-96ed-364f44b4b372-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.009079 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-utilities" (OuterVolumeSpecName: "utilities") pod "721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" (UID: "721dadd4-5c8a-4673-bcd6-d4d4ee5f1465"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.020847 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-kube-api-access-nnfd8" (OuterVolumeSpecName: "kube-api-access-nnfd8") pod "721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" (UID: "721dadd4-5c8a-4673-bcd6-d4d4ee5f1465"). InnerVolumeSpecName "kube-api-access-nnfd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.026713 4940 scope.go:117] "RemoveContainer" containerID="411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.035866 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-flxv9"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.036137 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" (UID: "721dadd4-5c8a-4673-bcd6-d4d4ee5f1465"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.040816 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-flxv9"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.086770 4940 scope.go:117] "RemoveContainer" containerID="ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.087392 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da\": container with ID starting with ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da not found: ID does not exist" containerID="ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.087431 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da"} err="failed to get container status \"ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da\": rpc error: code = NotFound desc = could not find container \"ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da\": container with ID starting with ee37ec33a3a2239fd4dfc2b0ae82d9b1456275ed826a1d1fb987b2b590b547da not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.087460 4940 scope.go:117] "RemoveContainer" containerID="bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.087941 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82\": container with ID starting with bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82 not found: ID does not exist" containerID="bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.087963 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82"} err="failed to get container status \"bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82\": rpc error: code = NotFound desc = could not find container \"bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82\": container with ID starting with bae05cab5c03833730df80eb8182c42b37a4bb89de279bf1b30db6a257102b82 not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.087975 4940 scope.go:117] "RemoveContainer" containerID="411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.088335 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf\": container with ID starting with 411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf not found: ID does not exist" containerID="411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.088377 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf"} err="failed to get container status \"411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf\": rpc error: code = NotFound desc = could not find container \"411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf\": container with ID starting with 411d21b8211feeb46913fa251fdd316337d61d5005285b02550b03046b128fcf not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.088404 4940 scope.go:117] "RemoveContainer" containerID="104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.109551 4940 scope.go:117] "RemoveContainer" containerID="e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.109888 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.109908 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnfd8\" (UniqueName: \"kubernetes.io/projected/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-kube-api-access-nnfd8\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.109917 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.128263 4940 scope.go:117] "RemoveContainer" containerID="d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.163381 4940 scope.go:117] "RemoveContainer" containerID="104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.164518 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d\": container with ID starting with 104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d not found: ID does not exist" containerID="104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.164552 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d"} err="failed to get container status \"104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d\": rpc error: code = NotFound desc = could not find container \"104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d\": container with ID starting with 104716f37199432e6240788268acdf3912f5e868a730d6f5f8307a58d7f36f5d not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.164575 4940 scope.go:117] "RemoveContainer" containerID="e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.166968 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0\": container with ID starting with e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0 not found: ID does not exist" containerID="e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.167019 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0"} err="failed to get container status \"e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0\": rpc error: code = NotFound desc = could not find container \"e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0\": container with ID starting with e895ecac4d3d395fd5ea20cc0a7426d6521adfb2f374a97cb70cab4607590af0 not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.167076 4940 scope.go:117] "RemoveContainer" containerID="d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.167679 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2\": container with ID starting with d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2 not found: ID does not exist" containerID="d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.167709 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2"} err="failed to get container status \"d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2\": rpc error: code = NotFound desc = could not find container \"d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2\": container with ID starting with d9d2f26d1d01f3444079756f1c3f711568f9fbb13c3394e0cdb84262c7dfc6f2 not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.180563 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ff2fz"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.180799 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ff2fz" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="registry-server" containerID="cri-o://a6ca4f624cacbb77ccc37f7837088108df3ba41ad1b65b85e1546f0e058805e5" gracePeriod=2 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.194713 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.317340 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-catalog-content\") pod \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.317632 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-utilities\") pod \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.317757 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqwt4\" (UniqueName: \"kubernetes.io/projected/61dabfc2-b66f-4c50-b217-7c58f2fd4725-kube-api-access-kqwt4\") pod \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\" (UID: \"61dabfc2-b66f-4c50-b217-7c58f2fd4725\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.320541 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61dabfc2-b66f-4c50-b217-7c58f2fd4725-kube-api-access-kqwt4" (OuterVolumeSpecName: "kube-api-access-kqwt4") pod "61dabfc2-b66f-4c50-b217-7c58f2fd4725" (UID: "61dabfc2-b66f-4c50-b217-7c58f2fd4725"). InnerVolumeSpecName "kube-api-access-kqwt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.321155 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-utilities" (OuterVolumeSpecName: "utilities") pod "61dabfc2-b66f-4c50-b217-7c58f2fd4725" (UID: "61dabfc2-b66f-4c50-b217-7c58f2fd4725"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.376369 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-974pq"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.376708 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-974pq" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerName="registry-server" containerID="cri-o://7bcd52de79ac31cee8828ed5e62930ffbb17fbe8e76eadb32e2ce022cb1b7c89" gracePeriod=2 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.410790 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "61dabfc2-b66f-4c50-b217-7c58f2fd4725" (UID: "61dabfc2-b66f-4c50-b217-7c58f2fd4725"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.419486 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqwt4\" (UniqueName: \"kubernetes.io/projected/61dabfc2-b66f-4c50-b217-7c58f2fd4725-kube-api-access-kqwt4\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.419512 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.419524 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61dabfc2-b66f-4c50-b217-7c58f2fd4725-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.443152 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.520324 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9prf\" (UniqueName: \"kubernetes.io/projected/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-kube-api-access-r9prf\") pod \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.520402 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-catalog-content\") pod \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.520478 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-utilities\") pod \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\" (UID: \"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.521258 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-utilities" (OuterVolumeSpecName: "utilities") pod "b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" (UID: "b1bb9974-4ef0-4c60-8aa4-5834cd1cda88"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.524999 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-kube-api-access-r9prf" (OuterVolumeSpecName: "kube-api-access-r9prf") pod "b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" (UID: "b1bb9974-4ef0-4c60-8aa4-5834cd1cda88"). InnerVolumeSpecName "kube-api-access-r9prf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.542065 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" (UID: "b1bb9974-4ef0-4c60-8aa4-5834cd1cda88"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.580643 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8k8ql"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.580872 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8k8ql" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="registry-server" containerID="cri-o://185e2151996492234e9c312215a76f565d36f30a09480a2a4b1ebca474569d65" gracePeriod=2 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.621719 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9prf\" (UniqueName: \"kubernetes.io/projected/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-kube-api-access-r9prf\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.621749 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.621758 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.633926 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-operators-8k8ql" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="registry-server" probeResult="failure" output="" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.705901 4940 generic.go:334] "Generic (PLEG): container finished" podID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerID="185e2151996492234e9c312215a76f565d36f30a09480a2a4b1ebca474569d65" exitCode=0 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.705966 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8k8ql" event={"ID":"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7","Type":"ContainerDied","Data":"185e2151996492234e9c312215a76f565d36f30a09480a2a4b1ebca474569d65"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.708966 4940 generic.go:334] "Generic (PLEG): container finished" podID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerID="57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d" exitCode=0 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.709054 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rdpw" event={"ID":"61dabfc2-b66f-4c50-b217-7c58f2fd4725","Type":"ContainerDied","Data":"57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.709072 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rdpw" event={"ID":"61dabfc2-b66f-4c50-b217-7c58f2fd4725","Type":"ContainerDied","Data":"46b62722647437ddb9f87e660acae9213317ffd734adbd1a650986237f31a394"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.709090 4940 scope.go:117] "RemoveContainer" containerID="57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.709118 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rdpw" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.719617 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l2gwh" event={"ID":"0282d0f2-66d7-40e4-96ed-364f44b4b372","Type":"ContainerDied","Data":"b42985e3b2ab8ca88f1191ffd6d04bdf0e9539a3762121ce60c5a5bcccdc4af4"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.719710 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l2gwh" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.726108 4940 generic.go:334] "Generic (PLEG): container finished" podID="03b477be-0073-4390-b87a-acdf508074ee" containerID="a6ca4f624cacbb77ccc37f7837088108df3ba41ad1b65b85e1546f0e058805e5" exitCode=0 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.726151 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ff2fz" event={"ID":"03b477be-0073-4390-b87a-acdf508074ee","Type":"ContainerDied","Data":"a6ca4f624cacbb77ccc37f7837088108df3ba41ad1b65b85e1546f0e058805e5"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.726195 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ff2fz" event={"ID":"03b477be-0073-4390-b87a-acdf508074ee","Type":"ContainerDied","Data":"706818166abd90aab80ea4a1761664df250f75305be8c7198c5cad8f0990840e"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.726255 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="706818166abd90aab80ea4a1761664df250f75305be8c7198c5cad8f0990840e" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.734252 4940 generic.go:334] "Generic (PLEG): container finished" podID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerID="7bcd52de79ac31cee8828ed5e62930ffbb17fbe8e76eadb32e2ce022cb1b7c89" exitCode=0 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.734310 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974pq" event={"ID":"6506e24e-d48f-4bd2-96b7-d32eb555dc79","Type":"ContainerDied","Data":"7bcd52de79ac31cee8828ed5e62930ffbb17fbe8e76eadb32e2ce022cb1b7c89"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.736824 4940 generic.go:334] "Generic (PLEG): container finished" podID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerID="85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1" exitCode=0 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.736927 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jnr7b" event={"ID":"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465","Type":"ContainerDied","Data":"85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.736954 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jnr7b" event={"ID":"721dadd4-5c8a-4673-bcd6-d4d4ee5f1465","Type":"ContainerDied","Data":"f1134c058a135b19545145e834b43e90ea560b5eaaa3b8ea728e30792c54f787"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.736934 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jnr7b" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.743338 4940 scope.go:117] "RemoveContainer" containerID="53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.758448 4940 generic.go:334] "Generic (PLEG): container finished" podID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerID="ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859" exitCode=0 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.758494 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85cvv" event={"ID":"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88","Type":"ContainerDied","Data":"ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.758524 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85cvv" event={"ID":"b1bb9974-4ef0-4c60-8aa4-5834cd1cda88","Type":"ContainerDied","Data":"604c18786bb1f74f3fd3d58a8a4d2f490e5bf16fa7c6d5631ec27337ef22deeb"} Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.758593 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-85cvv" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.768753 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.777786 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5q4sh"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.777949 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5q4sh" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerName="registry-server" containerID="cri-o://d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703" gracePeriod=2 Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.783577 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5rdpw"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.795928 4940 scope.go:117] "RemoveContainer" containerID="8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.806204 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5rdpw"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.824109 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dnbc\" (UniqueName: \"kubernetes.io/projected/03b477be-0073-4390-b87a-acdf508074ee-kube-api-access-4dnbc\") pod \"03b477be-0073-4390-b87a-acdf508074ee\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.824152 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-catalog-content\") pod \"03b477be-0073-4390-b87a-acdf508074ee\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.824254 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-utilities\") pod \"03b477be-0073-4390-b87a-acdf508074ee\" (UID: \"03b477be-0073-4390-b87a-acdf508074ee\") " Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.824736 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l2gwh"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.828248 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-utilities" (OuterVolumeSpecName: "utilities") pod "03b477be-0073-4390-b87a-acdf508074ee" (UID: "03b477be-0073-4390-b87a-acdf508074ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.829024 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03b477be-0073-4390-b87a-acdf508074ee-kube-api-access-4dnbc" (OuterVolumeSpecName: "kube-api-access-4dnbc") pod "03b477be-0073-4390-b87a-acdf508074ee" (UID: "03b477be-0073-4390-b87a-acdf508074ee"). InnerVolumeSpecName "kube-api-access-4dnbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.841334 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-l2gwh"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.843139 4940 scope.go:117] "RemoveContainer" containerID="57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.843517 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d\": container with ID starting with 57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d not found: ID does not exist" containerID="57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.843549 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d"} err="failed to get container status \"57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d\": rpc error: code = NotFound desc = could not find container \"57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d\": container with ID starting with 57fc63598239034fdd43a0db94436b2875ee90017ec98d8eacdfad907239581d not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.843569 4940 scope.go:117] "RemoveContainer" containerID="53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.843797 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509\": container with ID starting with 53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509 not found: ID does not exist" containerID="53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.843838 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509"} err="failed to get container status \"53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509\": rpc error: code = NotFound desc = could not find container \"53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509\": container with ID starting with 53af27094047580a37ffd2ac8fb1c0ab5b13c9949376a24c5fa3ce24c114a509 not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.843864 4940 scope.go:117] "RemoveContainer" containerID="8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134" Nov 26 07:20:46 crc kubenswrapper[4940]: E1126 07:20:46.844196 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134\": container with ID starting with 8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134 not found: ID does not exist" containerID="8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.844233 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134"} err="failed to get container status \"8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134\": rpc error: code = NotFound desc = could not find container \"8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134\": container with ID starting with 8b9dad9a346101cd3ab5941091e8383d9842d51baa34f02630d07eafed32a134 not found: ID does not exist" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.844260 4940 scope.go:117] "RemoveContainer" containerID="90615a282478c166895f4253b050436ec2d818b99f026886839941e78dbbd47d" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.853296 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jnr7b"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.860425 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jnr7b"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.866213 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-85cvv"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.877779 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.900466 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-85cvv"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.913418 4940 scope.go:117] "RemoveContainer" containerID="c39e90d5d20a72175594a7c79c1a9e865cc7995e36cd07f166dee18bfe13139c" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.916013 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "03b477be-0073-4390-b87a-acdf508074ee" (UID: "03b477be-0073-4390-b87a-acdf508074ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.929119 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dnbc\" (UniqueName: \"kubernetes.io/projected/03b477be-0073-4390-b87a-acdf508074ee-kube-api-access-4dnbc\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.929176 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.929187 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03b477be-0073-4390-b87a-acdf508074ee-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.937829 4940 scope.go:117] "RemoveContainer" containerID="2a64b8015158c7053b5ca75f915f3e47c1d033fca0961c5449b634d0c85bfad9" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.982314 4940 scope.go:117] "RemoveContainer" containerID="85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1" Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.983149 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fh5xx"] Nov 26 07:20:46 crc kubenswrapper[4940]: I1126 07:20:46.983368 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fh5xx" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerName="registry-server" containerID="cri-o://3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad" gracePeriod=2 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.004772 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.008957 4940 scope.go:117] "RemoveContainer" containerID="5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.029604 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-catalog-content\") pod \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.029693 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-utilities\") pod \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.029782 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rqlb\" (UniqueName: \"kubernetes.io/projected/6506e24e-d48f-4bd2-96b7-d32eb555dc79-kube-api-access-2rqlb\") pod \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\" (UID: \"6506e24e-d48f-4bd2-96b7-d32eb555dc79\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.032386 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-utilities" (OuterVolumeSpecName: "utilities") pod "6506e24e-d48f-4bd2-96b7-d32eb555dc79" (UID: "6506e24e-d48f-4bd2-96b7-d32eb555dc79"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.032664 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6506e24e-d48f-4bd2-96b7-d32eb555dc79-kube-api-access-2rqlb" (OuterVolumeSpecName: "kube-api-access-2rqlb") pod "6506e24e-d48f-4bd2-96b7-d32eb555dc79" (UID: "6506e24e-d48f-4bd2-96b7-d32eb555dc79"). InnerVolumeSpecName "kube-api-access-2rqlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.037717 4940 scope.go:117] "RemoveContainer" containerID="0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.047896 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6506e24e-d48f-4bd2-96b7-d32eb555dc79" (UID: "6506e24e-d48f-4bd2-96b7-d32eb555dc79"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.103775 4940 scope.go:117] "RemoveContainer" containerID="85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1" Nov 26 07:20:47 crc kubenswrapper[4940]: E1126 07:20:47.104355 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1\": container with ID starting with 85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1 not found: ID does not exist" containerID="85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.104390 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1"} err="failed to get container status \"85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1\": rpc error: code = NotFound desc = could not find container \"85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1\": container with ID starting with 85be68314a9574720b694d940d89227c0c520ad9a7b737dcfa8e27333c409ea1 not found: ID does not exist" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.104415 4940 scope.go:117] "RemoveContainer" containerID="5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d" Nov 26 07:20:47 crc kubenswrapper[4940]: E1126 07:20:47.104728 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d\": container with ID starting with 5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d not found: ID does not exist" containerID="5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.104746 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d"} err="failed to get container status \"5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d\": rpc error: code = NotFound desc = could not find container \"5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d\": container with ID starting with 5ccd915e0be4e6b8cc9be8ca85dd5245dd36c6d1ee4263d5c5d680c554f6237d not found: ID does not exist" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.104758 4940 scope.go:117] "RemoveContainer" containerID="0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea" Nov 26 07:20:47 crc kubenswrapper[4940]: E1126 07:20:47.104999 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea\": container with ID starting with 0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea not found: ID does not exist" containerID="0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.105020 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea"} err="failed to get container status \"0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea\": rpc error: code = NotFound desc = could not find container \"0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea\": container with ID starting with 0d2c8b75a3dd8fb244b22de8999511a5d4644bb4d57c28f7717e21517279f5ea not found: ID does not exist" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.105031 4940 scope.go:117] "RemoveContainer" containerID="ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.121020 4940 scope.go:117] "RemoveContainer" containerID="cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.130615 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nc8l2\" (UniqueName: \"kubernetes.io/projected/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-kube-api-access-nc8l2\") pod \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.130699 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-utilities\") pod \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.130830 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-catalog-content\") pod \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\" (UID: \"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.131200 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rqlb\" (UniqueName: \"kubernetes.io/projected/6506e24e-d48f-4bd2-96b7-d32eb555dc79-kube-api-access-2rqlb\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.131216 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.131225 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6506e24e-d48f-4bd2-96b7-d32eb555dc79-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.132140 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-utilities" (OuterVolumeSpecName: "utilities") pod "f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" (UID: "f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.133600 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-kube-api-access-nc8l2" (OuterVolumeSpecName: "kube-api-access-nc8l2") pod "f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" (UID: "f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7"). InnerVolumeSpecName "kube-api-access-nc8l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.143398 4940 scope.go:117] "RemoveContainer" containerID="d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.159609 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.173229 4940 scope.go:117] "RemoveContainer" containerID="ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859" Nov 26 07:20:47 crc kubenswrapper[4940]: E1126 07:20:47.183584 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859\": container with ID starting with ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859 not found: ID does not exist" containerID="ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.183638 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859"} err="failed to get container status \"ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859\": rpc error: code = NotFound desc = could not find container \"ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859\": container with ID starting with ab0cd8b100c8e914e83cfc58c57d51fc6e5b0b74b7393df11cebfe261cdc6859 not found: ID does not exist" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.183668 4940 scope.go:117] "RemoveContainer" containerID="cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646" Nov 26 07:20:47 crc kubenswrapper[4940]: E1126 07:20:47.192284 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646\": container with ID starting with cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646 not found: ID does not exist" containerID="cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.192325 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646"} err="failed to get container status \"cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646\": rpc error: code = NotFound desc = could not find container \"cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646\": container with ID starting with cda6f3a4f5549eeb8256cf2dad99cdfdd47a878cc8e640a0683258aedbbba646 not found: ID does not exist" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.192372 4940 scope.go:117] "RemoveContainer" containerID="d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.200786 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" path="/var/lib/kubelet/pods/0282d0f2-66d7-40e4-96ed-364f44b4b372/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.201624 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" path="/var/lib/kubelet/pods/199e1cfb-f5d2-4889-ba11-6ae0596f3dff/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.202598 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" path="/var/lib/kubelet/pods/3a5ddf3b-edb0-43af-ac3d-234e6ee82f91/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.203719 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" path="/var/lib/kubelet/pods/61dabfc2-b66f-4c50-b217-7c58f2fd4725/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.204292 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" path="/var/lib/kubelet/pods/721dadd4-5c8a-4673-bcd6-d4d4ee5f1465/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.205333 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e29fd36-80e8-4803-a438-6563640d769d" path="/var/lib/kubelet/pods/7e29fd36-80e8-4803-a438-6563640d769d/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: E1126 07:20:47.205389 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9\": container with ID starting with d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9 not found: ID does not exist" containerID="d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.205448 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9"} err="failed to get container status \"d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9\": rpc error: code = NotFound desc = could not find container \"d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9\": container with ID starting with d972129efc84c69217de1abb8fe45e52b38ea3640cc763c6ea5fd783752300d9 not found: ID does not exist" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.206024 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" path="/var/lib/kubelet/pods/a5c11eb7-a10a-486d-a692-2f3196c2cdbd/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.206641 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" path="/var/lib/kubelet/pods/b1bb9974-4ef0-4c60-8aa4-5834cd1cda88/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.207640 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" path="/var/lib/kubelet/pods/dfef8b09-5a70-4b1e-8287-763763cd8b99/volumes" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.214597 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwmzp"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.214842 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fwmzp" podUID="333dad53-ee05-4006-a57b-80ee2c090144" containerName="registry-server" containerID="cri-o://997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045" gracePeriod=2 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.232886 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lb6hb\" (UniqueName: \"kubernetes.io/projected/569e0af8-c086-4193-b1f9-4764c62b0d80-kube-api-access-lb6hb\") pod \"569e0af8-c086-4193-b1f9-4764c62b0d80\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.232991 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-utilities\") pod \"569e0af8-c086-4193-b1f9-4764c62b0d80\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.233026 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-catalog-content\") pod \"569e0af8-c086-4193-b1f9-4764c62b0d80\" (UID: \"569e0af8-c086-4193-b1f9-4764c62b0d80\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.233429 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nc8l2\" (UniqueName: \"kubernetes.io/projected/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-kube-api-access-nc8l2\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.233465 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.234280 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-utilities" (OuterVolumeSpecName: "utilities") pod "569e0af8-c086-4193-b1f9-4764c62b0d80" (UID: "569e0af8-c086-4193-b1f9-4764c62b0d80"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.236540 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/569e0af8-c086-4193-b1f9-4764c62b0d80-kube-api-access-lb6hb" (OuterVolumeSpecName: "kube-api-access-lb6hb") pod "569e0af8-c086-4193-b1f9-4764c62b0d80" (UID: "569e0af8-c086-4193-b1f9-4764c62b0d80"). InnerVolumeSpecName "kube-api-access-lb6hb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.248727 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" (UID: "f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.262078 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "569e0af8-c086-4193-b1f9-4764c62b0d80" (UID: "569e0af8-c086-4193-b1f9-4764c62b0d80"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.334697 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lb6hb\" (UniqueName: \"kubernetes.io/projected/569e0af8-c086-4193-b1f9-4764c62b0d80-kube-api-access-lb6hb\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.335011 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.335021 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/569e0af8-c086-4193-b1f9-4764c62b0d80-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.335032 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.374134 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ltrj6"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.374408 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ltrj6" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="registry-server" containerID="cri-o://83a94c79915de2c5dc795c02d7a2a9667d05534015e70bb454e2d116a23d3d74" gracePeriod=2 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.418501 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.539886 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gc4qp\" (UniqueName: \"kubernetes.io/projected/29459127-09a2-47c2-b6ca-3b76342e6e04-kube-api-access-gc4qp\") pod \"29459127-09a2-47c2-b6ca-3b76342e6e04\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.540021 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-utilities\") pod \"29459127-09a2-47c2-b6ca-3b76342e6e04\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.540057 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-catalog-content\") pod \"29459127-09a2-47c2-b6ca-3b76342e6e04\" (UID: \"29459127-09a2-47c2-b6ca-3b76342e6e04\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.540763 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-utilities" (OuterVolumeSpecName: "utilities") pod "29459127-09a2-47c2-b6ca-3b76342e6e04" (UID: "29459127-09a2-47c2-b6ca-3b76342e6e04"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.548691 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29459127-09a2-47c2-b6ca-3b76342e6e04-kube-api-access-gc4qp" (OuterVolumeSpecName: "kube-api-access-gc4qp") pod "29459127-09a2-47c2-b6ca-3b76342e6e04" (UID: "29459127-09a2-47c2-b6ca-3b76342e6e04"). InnerVolumeSpecName "kube-api-access-gc4qp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.569627 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5p6nh"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.569911 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5p6nh" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerName="registry-server" containerID="cri-o://550e970c6334a62d6813bdfcfbfbc0201455fb5ed8768607616cf3d321542e01" gracePeriod=2 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.640401 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29459127-09a2-47c2-b6ca-3b76342e6e04" (UID: "29459127-09a2-47c2-b6ca-3b76342e6e04"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.641333 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gc4qp\" (UniqueName: \"kubernetes.io/projected/29459127-09a2-47c2-b6ca-3b76342e6e04-kube-api-access-gc4qp\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.641354 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.641363 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29459127-09a2-47c2-b6ca-3b76342e6e04-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.684431 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.741933 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-catalog-content\") pod \"333dad53-ee05-4006-a57b-80ee2c090144\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.741984 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-utilities\") pod \"333dad53-ee05-4006-a57b-80ee2c090144\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.742020 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97nq4\" (UniqueName: \"kubernetes.io/projected/333dad53-ee05-4006-a57b-80ee2c090144-kube-api-access-97nq4\") pod \"333dad53-ee05-4006-a57b-80ee2c090144\" (UID: \"333dad53-ee05-4006-a57b-80ee2c090144\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.742993 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-utilities" (OuterVolumeSpecName: "utilities") pod "333dad53-ee05-4006-a57b-80ee2c090144" (UID: "333dad53-ee05-4006-a57b-80ee2c090144"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.745365 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/333dad53-ee05-4006-a57b-80ee2c090144-kube-api-access-97nq4" (OuterVolumeSpecName: "kube-api-access-97nq4") pod "333dad53-ee05-4006-a57b-80ee2c090144" (UID: "333dad53-ee05-4006-a57b-80ee2c090144"). InnerVolumeSpecName "kube-api-access-97nq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.759164 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "333dad53-ee05-4006-a57b-80ee2c090144" (UID: "333dad53-ee05-4006-a57b-80ee2c090144"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.772418 4940 generic.go:334] "Generic (PLEG): container finished" podID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerID="3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad" exitCode=0 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.772487 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fh5xx" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.772513 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fh5xx" event={"ID":"29459127-09a2-47c2-b6ca-3b76342e6e04","Type":"ContainerDied","Data":"3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.772922 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5m2tc"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.772940 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fh5xx" event={"ID":"29459127-09a2-47c2-b6ca-3b76342e6e04","Type":"ContainerDied","Data":"1bcd9e2bea9ea492edfbee1bd6c57e7e277d41b232539f96fc1f1d16ca927ebf"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.772985 4940 scope.go:117] "RemoveContainer" containerID="3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.773407 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5m2tc" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="registry-server" containerID="cri-o://00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f" gracePeriod=2 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.779859 4940 generic.go:334] "Generic (PLEG): container finished" podID="333dad53-ee05-4006-a57b-80ee2c090144" containerID="997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045" exitCode=0 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.780076 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwmzp" event={"ID":"333dad53-ee05-4006-a57b-80ee2c090144","Type":"ContainerDied","Data":"997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.780105 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwmzp" event={"ID":"333dad53-ee05-4006-a57b-80ee2c090144","Type":"ContainerDied","Data":"a1358580ec112e5bcc249eed870d01fe1caa66da921d58339a98a6e81a1ae0f7"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.780166 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fwmzp" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.786797 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8k8ql" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.786810 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8k8ql" event={"ID":"f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7","Type":"ContainerDied","Data":"b499aae95657b4f8df445760cd2d14e1290be04dbaef61e68358578c675de4e8"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.800714 4940 generic.go:334] "Generic (PLEG): container finished" podID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerID="83a94c79915de2c5dc795c02d7a2a9667d05534015e70bb454e2d116a23d3d74" exitCode=0 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.800807 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltrj6" event={"ID":"ebba4d6c-39d4-481b-acbf-92b6dab82439","Type":"ContainerDied","Data":"83a94c79915de2c5dc795c02d7a2a9667d05534015e70bb454e2d116a23d3d74"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.801748 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.812014 4940 scope.go:117] "RemoveContainer" containerID="e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.814437 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-974pq" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.814437 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974pq" event={"ID":"6506e24e-d48f-4bd2-96b7-d32eb555dc79","Type":"ContainerDied","Data":"06bd9286d8d8a2d541669fd6d22963b019d093effce574f34dca6f0740c6766f"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.816679 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwmzp"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.824644 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwmzp"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.842465 4940 generic.go:334] "Generic (PLEG): container finished" podID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerID="550e970c6334a62d6813bdfcfbfbc0201455fb5ed8768607616cf3d321542e01" exitCode=0 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.842546 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5p6nh" event={"ID":"647ebab8-d2f6-4cd8-ae64-9a822c756453","Type":"ContainerDied","Data":"550e970c6334a62d6813bdfcfbfbc0201455fb5ed8768607616cf3d321542e01"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.843285 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.843302 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/333dad53-ee05-4006-a57b-80ee2c090144-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.843314 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97nq4\" (UniqueName: \"kubernetes.io/projected/333dad53-ee05-4006-a57b-80ee2c090144-kube-api-access-97nq4\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.877300 4940 generic.go:334] "Generic (PLEG): container finished" podID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerID="d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703" exitCode=0 Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.877423 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ff2fz" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.878120 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5q4sh" event={"ID":"569e0af8-c086-4193-b1f9-4764c62b0d80","Type":"ContainerDied","Data":"d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.878170 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5q4sh" event={"ID":"569e0af8-c086-4193-b1f9-4764c62b0d80","Type":"ContainerDied","Data":"85ce9b8eb5c9c0a5a37d64492d47ed537ac50e9dda59a1203c5bd33aa19ca287"} Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.878182 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5q4sh" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.881155 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fh5xx"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.893937 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fh5xx"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.913309 4940 scope.go:117] "RemoveContainer" containerID="b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.944249 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8k8ql"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.944710 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vmlv\" (UniqueName: \"kubernetes.io/projected/ebba4d6c-39d4-481b-acbf-92b6dab82439-kube-api-access-5vmlv\") pod \"ebba4d6c-39d4-481b-acbf-92b6dab82439\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.944737 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-utilities\") pod \"ebba4d6c-39d4-481b-acbf-92b6dab82439\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.944866 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-catalog-content\") pod \"ebba4d6c-39d4-481b-acbf-92b6dab82439\" (UID: \"ebba4d6c-39d4-481b-acbf-92b6dab82439\") " Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.957213 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-utilities" (OuterVolumeSpecName: "utilities") pod "ebba4d6c-39d4-481b-acbf-92b6dab82439" (UID: "ebba4d6c-39d4-481b-acbf-92b6dab82439"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.964616 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8k8ql"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.965355 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebba4d6c-39d4-481b-acbf-92b6dab82439-kube-api-access-5vmlv" (OuterVolumeSpecName: "kube-api-access-5vmlv") pod "ebba4d6c-39d4-481b-acbf-92b6dab82439" (UID: "ebba4d6c-39d4-481b-acbf-92b6dab82439"). InnerVolumeSpecName "kube-api-access-5vmlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.977025 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-974pq"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.987178 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-974pq"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.990236 4940 scope.go:117] "RemoveContainer" containerID="3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.993436 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb2nd"] Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.993725 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bb2nd" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerName="registry-server" containerID="cri-o://dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08" gracePeriod=2 Nov 26 07:20:47 crc kubenswrapper[4940]: E1126 07:20:47.994796 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad\": container with ID starting with 3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad not found: ID does not exist" containerID="3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.994890 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad"} err="failed to get container status \"3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad\": rpc error: code = NotFound desc = could not find container \"3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad\": container with ID starting with 3ad4c909d61c8cd7795832ff75262321470a5dd9a355f104a760953f9c6766ad not found: ID does not exist" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.995002 4940 scope.go:117] "RemoveContainer" containerID="e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997" Nov 26 07:20:47 crc kubenswrapper[4940]: E1126 07:20:47.998138 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997\": container with ID starting with e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997 not found: ID does not exist" containerID="e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.998175 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997"} err="failed to get container status \"e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997\": rpc error: code = NotFound desc = could not find container \"e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997\": container with ID starting with e416982687e57d9dd27b3e7a6affc6b8ce0c0edbe3decab6eb5101b72c709997 not found: ID does not exist" Nov 26 07:20:47 crc kubenswrapper[4940]: I1126 07:20:47.998201 4940 scope.go:117] "RemoveContainer" containerID="b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.000966 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5q4sh"] Nov 26 07:20:48 crc kubenswrapper[4940]: E1126 07:20:48.013644 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3\": container with ID starting with b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3 not found: ID does not exist" containerID="b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.013696 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3"} err="failed to get container status \"b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3\": rpc error: code = NotFound desc = could not find container \"b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3\": container with ID starting with b5864759d0606e2cc590af7ff10b6146c9abb5e67d6fd9e9ca07bc64ce9369f3 not found: ID does not exist" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.013726 4940 scope.go:117] "RemoveContainer" containerID="997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.022370 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5q4sh"] Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.031697 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ff2fz"] Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.042708 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ff2fz"] Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.047976 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vmlv\" (UniqueName: \"kubernetes.io/projected/ebba4d6c-39d4-481b-acbf-92b6dab82439-kube-api-access-5vmlv\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.048023 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.052485 4940 scope.go:117] "RemoveContainer" containerID="f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.074767 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ebba4d6c-39d4-481b-acbf-92b6dab82439" (UID: "ebba4d6c-39d4-481b-acbf-92b6dab82439"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.090266 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.110412 4940 scope.go:117] "RemoveContainer" containerID="2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.134114 4940 scope.go:117] "RemoveContainer" containerID="997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045" Nov 26 07:20:48 crc kubenswrapper[4940]: E1126 07:20:48.135664 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045\": container with ID starting with 997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045 not found: ID does not exist" containerID="997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.135720 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045"} err="failed to get container status \"997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045\": rpc error: code = NotFound desc = could not find container \"997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045\": container with ID starting with 997d6251e3b0cab226e0876ec28aa7a18bf228012676dd354c6b83b8ec0e0045 not found: ID does not exist" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.135757 4940 scope.go:117] "RemoveContainer" containerID="f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a" Nov 26 07:20:48 crc kubenswrapper[4940]: E1126 07:20:48.136699 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a\": container with ID starting with f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a not found: ID does not exist" containerID="f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.136726 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a"} err="failed to get container status \"f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a\": rpc error: code = NotFound desc = could not find container \"f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a\": container with ID starting with f3a9ff19671ab84bca02053c53bf99c6be0a9bed09d06c801babfc6286f1908a not found: ID does not exist" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.136745 4940 scope.go:117] "RemoveContainer" containerID="2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299" Nov 26 07:20:48 crc kubenswrapper[4940]: E1126 07:20:48.138740 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299\": container with ID starting with 2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299 not found: ID does not exist" containerID="2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.138769 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299"} err="failed to get container status \"2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299\": rpc error: code = NotFound desc = could not find container \"2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299\": container with ID starting with 2153da4e4a50e1a52572ddf71cd91b06fa5c2fe73574d2eb0c87d7b4cff62299 not found: ID does not exist" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.138791 4940 scope.go:117] "RemoveContainer" containerID="185e2151996492234e9c312215a76f565d36f30a09480a2a4b1ebca474569d65" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.150488 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-utilities\") pod \"647ebab8-d2f6-4cd8-ae64-9a822c756453\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.150578 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-catalog-content\") pod \"647ebab8-d2f6-4cd8-ae64-9a822c756453\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.151412 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-utilities" (OuterVolumeSpecName: "utilities") pod "647ebab8-d2f6-4cd8-ae64-9a822c756453" (UID: "647ebab8-d2f6-4cd8-ae64-9a822c756453"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.161164 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dk64\" (UniqueName: \"kubernetes.io/projected/647ebab8-d2f6-4cd8-ae64-9a822c756453-kube-api-access-4dk64\") pod \"647ebab8-d2f6-4cd8-ae64-9a822c756453\" (UID: \"647ebab8-d2f6-4cd8-ae64-9a822c756453\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.164462 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/647ebab8-d2f6-4cd8-ae64-9a822c756453-kube-api-access-4dk64" (OuterVolumeSpecName: "kube-api-access-4dk64") pod "647ebab8-d2f6-4cd8-ae64-9a822c756453" (UID: "647ebab8-d2f6-4cd8-ae64-9a822c756453"). InnerVolumeSpecName "kube-api-access-4dk64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.165365 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebba4d6c-39d4-481b-acbf-92b6dab82439-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.165399 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.165412 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dk64\" (UniqueName: \"kubernetes.io/projected/647ebab8-d2f6-4cd8-ae64-9a822c756453-kube-api-access-4dk64\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.171368 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "647ebab8-d2f6-4cd8-ae64-9a822c756453" (UID: "647ebab8-d2f6-4cd8-ae64-9a822c756453"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.177299 4940 scope.go:117] "RemoveContainer" containerID="2ceb1c4ae25de0eb323af0bbb137361f5cb18503d8beead31ecccc44e49beb53" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.211602 4940 scope.go:117] "RemoveContainer" containerID="62795aee6275762280c8a0f5558e233b643d59357ca1c6d87df7b15875956069" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.214131 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.245575 4940 scope.go:117] "RemoveContainer" containerID="7bcd52de79ac31cee8828ed5e62930ffbb17fbe8e76eadb32e2ce022cb1b7c89" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.268466 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/647ebab8-d2f6-4cd8-ae64-9a822c756453-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.273658 4940 scope.go:117] "RemoveContainer" containerID="9d4ba8e428173cf5d2ffd87de437b788ab95dd285984ca179c5ffb25df5c4d24" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.312740 4940 scope.go:117] "RemoveContainer" containerID="2962b97c54e9693b5efd9036acfd1f4930ff2c80e9f10c3aeba8a34fdbbaaf3a" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.369587 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279wc\" (UniqueName: \"kubernetes.io/projected/6be8a53f-d253-4c4b-8e7c-87277566773d-kube-api-access-279wc\") pod \"6be8a53f-d253-4c4b-8e7c-87277566773d\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.369669 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-utilities\") pod \"6be8a53f-d253-4c4b-8e7c-87277566773d\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.369714 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-catalog-content\") pod \"6be8a53f-d253-4c4b-8e7c-87277566773d\" (UID: \"6be8a53f-d253-4c4b-8e7c-87277566773d\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.370977 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-utilities" (OuterVolumeSpecName: "utilities") pod "6be8a53f-d253-4c4b-8e7c-87277566773d" (UID: "6be8a53f-d253-4c4b-8e7c-87277566773d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.372720 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6be8a53f-d253-4c4b-8e7c-87277566773d-kube-api-access-279wc" (OuterVolumeSpecName: "kube-api-access-279wc") pod "6be8a53f-d253-4c4b-8e7c-87277566773d" (UID: "6be8a53f-d253-4c4b-8e7c-87277566773d"). InnerVolumeSpecName "kube-api-access-279wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.423668 4940 scope.go:117] "RemoveContainer" containerID="d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.444410 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.455053 4940 scope.go:117] "RemoveContainer" containerID="a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.470008 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6be8a53f-d253-4c4b-8e7c-87277566773d" (UID: "6be8a53f-d253-4c4b-8e7c-87277566773d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.470958 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.470988 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be8a53f-d253-4c4b-8e7c-87277566773d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.471102 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279wc\" (UniqueName: \"kubernetes.io/projected/6be8a53f-d253-4c4b-8e7c-87277566773d-kube-api-access-279wc\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.481854 4940 scope.go:117] "RemoveContainer" containerID="697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.500057 4940 scope.go:117] "RemoveContainer" containerID="d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703" Nov 26 07:20:48 crc kubenswrapper[4940]: E1126 07:20:48.500430 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703\": container with ID starting with d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703 not found: ID does not exist" containerID="d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.500472 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703"} err="failed to get container status \"d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703\": rpc error: code = NotFound desc = could not find container \"d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703\": container with ID starting with d3ee91b7c1b97076ba49a4dadc05139dcc8f529b8622bf1291fc9f38e6c3a703 not found: ID does not exist" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.500502 4940 scope.go:117] "RemoveContainer" containerID="a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098" Nov 26 07:20:48 crc kubenswrapper[4940]: E1126 07:20:48.500786 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098\": container with ID starting with a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098 not found: ID does not exist" containerID="a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.500817 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098"} err="failed to get container status \"a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098\": rpc error: code = NotFound desc = could not find container \"a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098\": container with ID starting with a4a8ade12e413f5eb02dc98aa15b0750f8d9af736470bc2bef619fc101a18098 not found: ID does not exist" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.500840 4940 scope.go:117] "RemoveContainer" containerID="697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a" Nov 26 07:20:48 crc kubenswrapper[4940]: E1126 07:20:48.501131 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a\": container with ID starting with 697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a not found: ID does not exist" containerID="697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.501157 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a"} err="failed to get container status \"697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a\": rpc error: code = NotFound desc = could not find container \"697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a\": container with ID starting with 697db6445d165467c7714c5cbbe01796072da79e72de633e47f55672cc44506a not found: ID does not exist" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.572131 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-utilities\") pod \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.572206 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-catalog-content\") pod \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.573140 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pst7r\" (UniqueName: \"kubernetes.io/projected/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-kube-api-access-pst7r\") pod \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\" (UID: \"f53b29f5-a2cb-45a2-84ea-71322b48ea8b\") " Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.573182 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-utilities" (OuterVolumeSpecName: "utilities") pod "f53b29f5-a2cb-45a2-84ea-71322b48ea8b" (UID: "f53b29f5-a2cb-45a2-84ea-71322b48ea8b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.573625 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.577432 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-kube-api-access-pst7r" (OuterVolumeSpecName: "kube-api-access-pst7r") pod "f53b29f5-a2cb-45a2-84ea-71322b48ea8b" (UID: "f53b29f5-a2cb-45a2-84ea-71322b48ea8b"). InnerVolumeSpecName "kube-api-access-pst7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.588363 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f53b29f5-a2cb-45a2-84ea-71322b48ea8b" (UID: "f53b29f5-a2cb-45a2-84ea-71322b48ea8b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.675110 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.675159 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pst7r\" (UniqueName: \"kubernetes.io/projected/f53b29f5-a2cb-45a2-84ea-71322b48ea8b-kube-api-access-pst7r\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.899710 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ltrj6" event={"ID":"ebba4d6c-39d4-481b-acbf-92b6dab82439","Type":"ContainerDied","Data":"f28564199b522356232fa69c0b397d068692d1507a79c70b43f924cf4ef62e2b"} Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.899743 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ltrj6" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.899798 4940 scope.go:117] "RemoveContainer" containerID="83a94c79915de2c5dc795c02d7a2a9667d05534015e70bb454e2d116a23d3d74" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.911192 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5p6nh" event={"ID":"647ebab8-d2f6-4cd8-ae64-9a822c756453","Type":"ContainerDied","Data":"967bd9eadfe7a2b1ab6baa521075371ad6481a362907be7e5721d3ce55d7e732"} Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.911312 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5p6nh" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.917277 4940 generic.go:334] "Generic (PLEG): container finished" podID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerID="dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08" exitCode=0 Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.917369 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb2nd" event={"ID":"f53b29f5-a2cb-45a2-84ea-71322b48ea8b","Type":"ContainerDied","Data":"dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08"} Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.917429 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bb2nd" event={"ID":"f53b29f5-a2cb-45a2-84ea-71322b48ea8b","Type":"ContainerDied","Data":"d8bd9da36b0c720bcd881749e96b54b31848c2cc46b72a9b9c487253e0726132"} Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.918382 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bb2nd" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.921425 4940 generic.go:334] "Generic (PLEG): container finished" podID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerID="00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f" exitCode=0 Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.921453 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m2tc" event={"ID":"6be8a53f-d253-4c4b-8e7c-87277566773d","Type":"ContainerDied","Data":"00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f"} Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.921477 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m2tc" event={"ID":"6be8a53f-d253-4c4b-8e7c-87277566773d","Type":"ContainerDied","Data":"05581b0e697c504b7c1893755032ef19aba4f2c48e20b4bbf889ee4ad8938120"} Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.921535 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m2tc" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.949073 4940 scope.go:117] "RemoveContainer" containerID="8d2d4ca96287deb3ef1ff551058e5550dde9dbdb9e6d7b04c6f35d349c51a618" Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.961111 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ltrj6"] Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.977111 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ltrj6"] Nov 26 07:20:48 crc kubenswrapper[4940]: I1126 07:20:48.991125 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5p6nh"] Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.000649 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5p6nh"] Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.008429 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb2nd"] Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.015016 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bb2nd"] Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.016150 4940 scope.go:117] "RemoveContainer" containerID="f018787d2db1a2000b5b7ec2e40095408e768e5fed776e8bada55c6604b5b7de" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.021214 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5m2tc"] Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.026300 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5m2tc"] Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.042607 4940 scope.go:117] "RemoveContainer" containerID="550e970c6334a62d6813bdfcfbfbc0201455fb5ed8768607616cf3d321542e01" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.062232 4940 scope.go:117] "RemoveContainer" containerID="b4be4a2c51d29d4bdda835eb0bd45ac46d005ede37dc9ec4623836d77abeca48" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.085345 4940 scope.go:117] "RemoveContainer" containerID="0b4a38b88019c22d3505210d695c193d7f48242be69824926e6c048ceef474db" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.113160 4940 scope.go:117] "RemoveContainer" containerID="dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.129718 4940 scope.go:117] "RemoveContainer" containerID="2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.146710 4940 scope.go:117] "RemoveContainer" containerID="54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.162571 4940 scope.go:117] "RemoveContainer" containerID="dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08" Nov 26 07:20:49 crc kubenswrapper[4940]: E1126 07:20:49.163262 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08\": container with ID starting with dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08 not found: ID does not exist" containerID="dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.163303 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08"} err="failed to get container status \"dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08\": rpc error: code = NotFound desc = could not find container \"dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08\": container with ID starting with dc754aff88f2d5811667a4f933d9a7db5b3636f416675637c008776669736a08 not found: ID does not exist" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.163330 4940 scope.go:117] "RemoveContainer" containerID="2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5" Nov 26 07:20:49 crc kubenswrapper[4940]: E1126 07:20:49.163658 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5\": container with ID starting with 2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5 not found: ID does not exist" containerID="2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.163700 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5"} err="failed to get container status \"2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5\": rpc error: code = NotFound desc = could not find container \"2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5\": container with ID starting with 2047afda6488a2745b4674804e26a118a02812ccc27582125fab56cde1ac53d5 not found: ID does not exist" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.163729 4940 scope.go:117] "RemoveContainer" containerID="54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f" Nov 26 07:20:49 crc kubenswrapper[4940]: E1126 07:20:49.163988 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f\": container with ID starting with 54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f not found: ID does not exist" containerID="54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.164029 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f"} err="failed to get container status \"54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f\": rpc error: code = NotFound desc = could not find container \"54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f\": container with ID starting with 54d9735f724b9ea2881041eaa1ae47f17afdce155066edd331adf702643aea8f not found: ID does not exist" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.164076 4940 scope.go:117] "RemoveContainer" containerID="00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.183233 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03b477be-0073-4390-b87a-acdf508074ee" path="/var/lib/kubelet/pods/03b477be-0073-4390-b87a-acdf508074ee/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.183838 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" path="/var/lib/kubelet/pods/29459127-09a2-47c2-b6ca-3b76342e6e04/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.184470 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="333dad53-ee05-4006-a57b-80ee2c090144" path="/var/lib/kubelet/pods/333dad53-ee05-4006-a57b-80ee2c090144/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.185525 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" path="/var/lib/kubelet/pods/569e0af8-c086-4193-b1f9-4764c62b0d80/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.186175 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" path="/var/lib/kubelet/pods/647ebab8-d2f6-4cd8-ae64-9a822c756453/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.187176 4940 scope.go:117] "RemoveContainer" containerID="58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.187380 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" path="/var/lib/kubelet/pods/6506e24e-d48f-4bd2-96b7-d32eb555dc79/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.188077 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" path="/var/lib/kubelet/pods/6be8a53f-d253-4c4b-8e7c-87277566773d/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.188718 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" path="/var/lib/kubelet/pods/ebba4d6c-39d4-481b-acbf-92b6dab82439/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.189727 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" path="/var/lib/kubelet/pods/f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.190298 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" path="/var/lib/kubelet/pods/f53b29f5-a2cb-45a2-84ea-71322b48ea8b/volumes" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.205380 4940 scope.go:117] "RemoveContainer" containerID="348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.222924 4940 scope.go:117] "RemoveContainer" containerID="00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f" Nov 26 07:20:49 crc kubenswrapper[4940]: E1126 07:20:49.223661 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f\": container with ID starting with 00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f not found: ID does not exist" containerID="00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.223686 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f"} err="failed to get container status \"00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f\": rpc error: code = NotFound desc = could not find container \"00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f\": container with ID starting with 00054fb8b631229e5db4268edf804f24215d1bddc571e61ffd7682a7424dc20f not found: ID does not exist" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.223706 4940 scope.go:117] "RemoveContainer" containerID="58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a" Nov 26 07:20:49 crc kubenswrapper[4940]: E1126 07:20:49.224004 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a\": container with ID starting with 58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a not found: ID does not exist" containerID="58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.224027 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a"} err="failed to get container status \"58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a\": rpc error: code = NotFound desc = could not find container \"58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a\": container with ID starting with 58833945f7c71d07827c2bd47361de10e648131459066e121a7e0e12dc4f2f6a not found: ID does not exist" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.224060 4940 scope.go:117] "RemoveContainer" containerID="348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110" Nov 26 07:20:49 crc kubenswrapper[4940]: E1126 07:20:49.224631 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110\": container with ID starting with 348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110 not found: ID does not exist" containerID="348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110" Nov 26 07:20:49 crc kubenswrapper[4940]: I1126 07:20:49.224650 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110"} err="failed to get container status \"348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110\": rpc error: code = NotFound desc = could not find container \"348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110\": container with ID starting with 348bc13a85659df13f244dae89de705517cb7279d69ef3c6a30a5c2b54aac110 not found: ID does not exist" Nov 26 07:20:51 crc kubenswrapper[4940]: I1126 07:20:51.728204 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:20:51 crc kubenswrapper[4940]: I1126 07:20:51.728479 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:21:05 crc kubenswrapper[4940]: I1126 07:21:05.857577 4940 scope.go:117] "RemoveContainer" containerID="939ccd4afccd08df91420e1a45d12166e869acef7735945e6dcdc351f2148b88" Nov 26 07:21:05 crc kubenswrapper[4940]: I1126 07:21:05.900441 4940 scope.go:117] "RemoveContainer" containerID="2227bb45b35e6b1fcd5af6d0afba00e9a99c6d24596b85f6b4a5623ebda271e8" Nov 26 07:21:05 crc kubenswrapper[4940]: I1126 07:21:05.946475 4940 scope.go:117] "RemoveContainer" containerID="95344aa6ca07c1a8566037a544ee375fe3f57a2f83ff0864fdf7dc405181e926" Nov 26 07:21:05 crc kubenswrapper[4940]: I1126 07:21:05.968965 4940 scope.go:117] "RemoveContainer" containerID="cbd7374187aae825ed18d1f8c6f4abf7a3f9464fa401bca64846ddf14a6bf493" Nov 26 07:21:06 crc kubenswrapper[4940]: I1126 07:21:06.003928 4940 scope.go:117] "RemoveContainer" containerID="b50c6efb974830e763174531b560df630b1d175d6e6f53275c7e3695c999a78a" Nov 26 07:21:06 crc kubenswrapper[4940]: I1126 07:21:06.027083 4940 scope.go:117] "RemoveContainer" containerID="1b0b7a8153fa17b875b912905ca3eeecef250cea4ba30fadfa693ec0401d9e0a" Nov 26 07:21:21 crc kubenswrapper[4940]: I1126 07:21:21.728906 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:21:21 crc kubenswrapper[4940]: I1126 07:21:21.730249 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:21:51 crc kubenswrapper[4940]: I1126 07:21:51.728627 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:21:51 crc kubenswrapper[4940]: I1126 07:21:51.729462 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:21:51 crc kubenswrapper[4940]: I1126 07:21:51.729550 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:21:51 crc kubenswrapper[4940]: I1126 07:21:51.730638 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:21:51 crc kubenswrapper[4940]: I1126 07:21:51.730747 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" gracePeriod=600 Nov 26 07:21:51 crc kubenswrapper[4940]: E1126 07:21:51.855308 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:21:52 crc kubenswrapper[4940]: I1126 07:21:52.602493 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" exitCode=0 Nov 26 07:21:52 crc kubenswrapper[4940]: I1126 07:21:52.602563 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c"} Nov 26 07:21:52 crc kubenswrapper[4940]: I1126 07:21:52.602615 4940 scope.go:117] "RemoveContainer" containerID="8474433f4fb6790caac5b18927cabd8ff680b641694a71892a6b78f7af1c5c17" Nov 26 07:21:52 crc kubenswrapper[4940]: I1126 07:21:52.603560 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:21:52 crc kubenswrapper[4940]: E1126 07:21:52.604227 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:22:04 crc kubenswrapper[4940]: I1126 07:22:04.165656 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:22:04 crc kubenswrapper[4940]: E1126 07:22:04.166390 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.487321 4940 scope.go:117] "RemoveContainer" containerID="c4639981f7e85fc58d57349ae26c0c6d76318e4de9c80c781725c8673d4d82b5" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.548837 4940 scope.go:117] "RemoveContainer" containerID="b6e1c4625e97355a0fe0b8a594ce2614bec2811d433a4b978c8ba509446fe28f" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.584957 4940 scope.go:117] "RemoveContainer" containerID="9a08f7c810165e49dfe9ccdca40dca17e1157ad42f3c6e09fe28969dda99fecd" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.607091 4940 scope.go:117] "RemoveContainer" containerID="d72cd9ac728cbf3b30106e379fabf798c41b42cb28d594cc7033ef0b760e6a02" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.638354 4940 scope.go:117] "RemoveContainer" containerID="f53d0795cc850bda7285285cc6b6026c59a493da41e84bd627074b51135359b5" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.667439 4940 scope.go:117] "RemoveContainer" containerID="43a7762398dad13753c984843344a87476a120cf255f7b546b0b0863cc22a8be" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.685994 4940 scope.go:117] "RemoveContainer" containerID="702669136ea3ef57ecdc9f743c124a1da4686d32e188db2479f4eac18212047b" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.705497 4940 scope.go:117] "RemoveContainer" containerID="d498985319cb68f63f891777bc19a7745369f745cdaae2d1da4234f423bbc721" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.729541 4940 scope.go:117] "RemoveContainer" containerID="e5fa80963950fb43968ab06d391a280351e421fdee0cb75e947fe3346d50eda0" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.763122 4940 scope.go:117] "RemoveContainer" containerID="accbdd0a87a62dcaa8a1369d852c0e0f498d0fcb0a19728b7f616bf21d8918c5" Nov 26 07:22:06 crc kubenswrapper[4940]: I1126 07:22:06.780082 4940 scope.go:117] "RemoveContainer" containerID="b65703564f9318a7bf88f52111da348fe0e8f2f2747fbd28dcc03006797af0bc" Nov 26 07:22:18 crc kubenswrapper[4940]: I1126 07:22:18.165230 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:22:18 crc kubenswrapper[4940]: E1126 07:22:18.165958 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:22:31 crc kubenswrapper[4940]: I1126 07:22:31.166068 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:22:31 crc kubenswrapper[4940]: E1126 07:22:31.166963 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:22:43 crc kubenswrapper[4940]: I1126 07:22:43.165710 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:22:43 crc kubenswrapper[4940]: E1126 07:22:43.166929 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:22:56 crc kubenswrapper[4940]: I1126 07:22:56.165804 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:22:56 crc kubenswrapper[4940]: E1126 07:22:56.166382 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:23:06 crc kubenswrapper[4940]: I1126 07:23:06.896702 4940 scope.go:117] "RemoveContainer" containerID="8d83826e05c1cc03071fc239f8b2b62ced78d755990da036c83dd3d12b2e4231" Nov 26 07:23:06 crc kubenswrapper[4940]: I1126 07:23:06.936696 4940 scope.go:117] "RemoveContainer" containerID="5ebd02f88b9688b1e58f4c5bef3d10671c9d5b3d57bbdf96a5f704fe945e3bba" Nov 26 07:23:06 crc kubenswrapper[4940]: I1126 07:23:06.983540 4940 scope.go:117] "RemoveContainer" containerID="72e7a0c82ce7a29c8be03ae3e4dc6deec9e621f14440cc43e9a2dd27ad33bac0" Nov 26 07:23:07 crc kubenswrapper[4940]: I1126 07:23:07.006860 4940 scope.go:117] "RemoveContainer" containerID="e0c636b12aa08795b31b27a1d917b15d8b732238083a8564a42737a92c658225" Nov 26 07:23:07 crc kubenswrapper[4940]: I1126 07:23:07.041526 4940 scope.go:117] "RemoveContainer" containerID="df777f9d08415a40f6552e65002da2faf05b83dc3817620a46e85b24ddaab1a1" Nov 26 07:23:09 crc kubenswrapper[4940]: I1126 07:23:09.174086 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:23:09 crc kubenswrapper[4940]: E1126 07:23:09.174681 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:23:21 crc kubenswrapper[4940]: I1126 07:23:21.166028 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:23:21 crc kubenswrapper[4940]: E1126 07:23:21.167452 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:23:33 crc kubenswrapper[4940]: I1126 07:23:33.166849 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:23:33 crc kubenswrapper[4940]: E1126 07:23:33.169659 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:23:46 crc kubenswrapper[4940]: I1126 07:23:46.165277 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:23:46 crc kubenswrapper[4940]: E1126 07:23:46.165905 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:23:59 crc kubenswrapper[4940]: I1126 07:23:59.192351 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:23:59 crc kubenswrapper[4940]: E1126 07:23:59.193658 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:24:07 crc kubenswrapper[4940]: I1126 07:24:07.155260 4940 scope.go:117] "RemoveContainer" containerID="0e5f1295bc9bd05f924d7ad125718c5325b29da946b426629ce0de7b927b2d74" Nov 26 07:24:07 crc kubenswrapper[4940]: I1126 07:24:07.196452 4940 scope.go:117] "RemoveContainer" containerID="53d8c6b6abfceb5f620ed0f9437eb514b359bef4a1edb590086b6b445c8b72bf" Nov 26 07:24:07 crc kubenswrapper[4940]: I1126 07:24:07.243425 4940 scope.go:117] "RemoveContainer" containerID="8e24898a0ff112e87bf65f7928b22b6ce54972a826d301fced34470797356e71" Nov 26 07:24:07 crc kubenswrapper[4940]: I1126 07:24:07.274518 4940 scope.go:117] "RemoveContainer" containerID="ff7cc02587d72ccd72860ca1a6ae9d091cb7065bd1a5b449325803e602ba872d" Nov 26 07:24:07 crc kubenswrapper[4940]: I1126 07:24:07.302959 4940 scope.go:117] "RemoveContainer" containerID="f1b02b23cdc5b3262b580dd8d36232c1e597b070d2f207d82ecf8dc91ab182c1" Nov 26 07:24:12 crc kubenswrapper[4940]: I1126 07:24:12.166445 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:24:12 crc kubenswrapper[4940]: E1126 07:24:12.167581 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:24:25 crc kubenswrapper[4940]: I1126 07:24:25.165979 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:24:25 crc kubenswrapper[4940]: E1126 07:24:25.166751 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:24:38 crc kubenswrapper[4940]: I1126 07:24:38.166074 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:24:38 crc kubenswrapper[4940]: E1126 07:24:38.166884 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:24:49 crc kubenswrapper[4940]: I1126 07:24:49.169554 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:24:49 crc kubenswrapper[4940]: E1126 07:24:49.170205 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:25:02 crc kubenswrapper[4940]: I1126 07:25:02.165998 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:25:02 crc kubenswrapper[4940]: E1126 07:25:02.166598 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:25:13 crc kubenswrapper[4940]: I1126 07:25:13.166702 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:25:13 crc kubenswrapper[4940]: E1126 07:25:13.167488 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:25:27 crc kubenswrapper[4940]: I1126 07:25:27.165846 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:25:27 crc kubenswrapper[4940]: E1126 07:25:27.166492 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:25:40 crc kubenswrapper[4940]: I1126 07:25:40.165184 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:25:40 crc kubenswrapper[4940]: E1126 07:25:40.177140 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:25:54 crc kubenswrapper[4940]: I1126 07:25:54.166644 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:25:54 crc kubenswrapper[4940]: E1126 07:25:54.167671 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:26:07 crc kubenswrapper[4940]: I1126 07:26:07.428748 4940 scope.go:117] "RemoveContainer" containerID="a6ca4f624cacbb77ccc37f7837088108df3ba41ad1b65b85e1546f0e058805e5" Nov 26 07:26:07 crc kubenswrapper[4940]: I1126 07:26:07.461742 4940 scope.go:117] "RemoveContainer" containerID="d557c202fba44003b92a7e1f4a0e38539d5c7d532189c663f086068155b91ffd" Nov 26 07:26:07 crc kubenswrapper[4940]: I1126 07:26:07.503564 4940 scope.go:117] "RemoveContainer" containerID="abd64615531456f98ecfadbbd379a5f9887773d96dabb6f2ad129fb2e5febd23" Nov 26 07:26:09 crc kubenswrapper[4940]: I1126 07:26:09.172364 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:26:09 crc kubenswrapper[4940]: E1126 07:26:09.172967 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:26:24 crc kubenswrapper[4940]: I1126 07:26:24.165815 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:26:24 crc kubenswrapper[4940]: E1126 07:26:24.167023 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:26:37 crc kubenswrapper[4940]: I1126 07:26:37.165483 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:26:37 crc kubenswrapper[4940]: E1126 07:26:37.166290 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:26:52 crc kubenswrapper[4940]: I1126 07:26:52.166440 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:26:52 crc kubenswrapper[4940]: I1126 07:26:52.602276 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"3678b720dcf0b48a015b51f1c6a8e187694a275b8e4392b27b4d5c6b565b2e10"} Nov 26 07:29:21 crc kubenswrapper[4940]: I1126 07:29:21.728349 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:29:21 crc kubenswrapper[4940]: I1126 07:29:21.729195 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.324529 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ljxln"] Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325442 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325457 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325483 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="333dad53-ee05-4006-a57b-80ee2c090144" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325491 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="333dad53-ee05-4006-a57b-80ee2c090144" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325503 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325512 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325523 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325530 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325545 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e29fd36-80e8-4803-a438-6563640d769d" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325552 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e29fd36-80e8-4803-a438-6563640d769d" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325566 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="907d2835-1aee-4b5a-9726-a75946007030" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325573 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="907d2835-1aee-4b5a-9726-a75946007030" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325588 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325595 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325611 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325619 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325634 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325641 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325653 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325660 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325673 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325681 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325697 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="333dad53-ee05-4006-a57b-80ee2c090144" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325704 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="333dad53-ee05-4006-a57b-80ee2c090144" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325720 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325727 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325736 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325744 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325760 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325767 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325778 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325787 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325796 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325804 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325818 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325826 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325833 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325840 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325852 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325859 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325873 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="907d2835-1aee-4b5a-9726-a75946007030" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325881 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="907d2835-1aee-4b5a-9726-a75946007030" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325896 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325904 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325912 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325919 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325930 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325937 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325952 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="333dad53-ee05-4006-a57b-80ee2c090144" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325960 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="333dad53-ee05-4006-a57b-80ee2c090144" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325974 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325982 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.325990 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.325997 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326007 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326015 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326029 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326056 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326065 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326072 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326087 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326094 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326105 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326113 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326130 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326138 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326150 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326157 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326168 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326174 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326183 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326190 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326201 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326209 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326220 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326227 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326242 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326248 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326257 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326263 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326274 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326283 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326297 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e29fd36-80e8-4803-a438-6563640d769d" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326305 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e29fd36-80e8-4803-a438-6563640d769d" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326316 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326325 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326337 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326345 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326358 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326366 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326376 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326384 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326395 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326403 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326415 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326422 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326432 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326440 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326453 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326461 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326473 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326480 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326494 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326501 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326513 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e29fd36-80e8-4803-a438-6563640d769d" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326521 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e29fd36-80e8-4803-a438-6563640d769d" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326536 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326544 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326574 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="907d2835-1aee-4b5a-9726-a75946007030" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326583 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="907d2835-1aee-4b5a-9726-a75946007030" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326591 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326597 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326612 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326620 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326632 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326642 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326651 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326659 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326671 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326679 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326691 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326698 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326709 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326716 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326727 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326734 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326745 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326753 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326762 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326770 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326784 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326793 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326808 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326816 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326829 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326838 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326850 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326858 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326871 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326879 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326892 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326900 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326914 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326922 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326936 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326944 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326952 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326960 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326973 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.326981 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.326992 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327000 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327012 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327019 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327051 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327063 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327077 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327086 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327097 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327105 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327115 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327123 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327131 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327138 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327148 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327155 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327163 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327171 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327180 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327188 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327198 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327206 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327216 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327224 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327233 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327240 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327253 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327261 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerName="extract-content" Nov 26 07:29:43 crc kubenswrapper[4940]: E1126 07:29:43.327273 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327281 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerName="extract-utilities" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327478 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3b8d5ff-c6bd-48f2-9d6f-41394f79eff7" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327498 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c5158dd-e8a1-48fa-a742-32c73dafd95b" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327512 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a5ddf3b-edb0-43af-ac3d-234e6ee82f91" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327526 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="29459127-09a2-47c2-b6ca-3b76342e6e04" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327537 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="199e1cfb-f5d2-4889-ba11-6ae0596f3dff" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327549 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6be8a53f-d253-4c4b-8e7c-87277566773d" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327564 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfef8b09-5a70-4b1e-8287-763763cd8b99" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327576 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e29fd36-80e8-4803-a438-6563640d769d" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327590 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="721dadd4-5c8a-4673-bcd6-d4d4ee5f1465" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327605 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="907d2835-1aee-4b5a-9726-a75946007030" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327613 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="647ebab8-d2f6-4cd8-ae64-9a822c756453" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327628 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0282d0f2-66d7-40e4-96ed-364f44b4b372" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327641 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ee6f9b2-a68f-4ff0-bc6c-48f12275e797" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327653 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6506e24e-d48f-4bd2-96b7-d32eb555dc79" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327662 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebba4d6c-39d4-481b-acbf-92b6dab82439" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327675 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c6b3855-b23e-46a6-abfd-47c2ab2c4b5f" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327692 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="61dabfc2-b66f-4c50-b217-7c58f2fd4725" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327705 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6bdf3ca-78b6-4f21-ad0e-f51e77a09627" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327719 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="bab18c4f-0062-48f3-b320-3e503c35d5b2" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327727 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bcd6c0e-2ac6-4114-a0d2-6d63155d06a4" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327739 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="333dad53-ee05-4006-a57b-80ee2c090144" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327753 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="569e0af8-c086-4193-b1f9-4764c62b0d80" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327762 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdaa4f87-0d23-46ca-882d-eb01a4482290" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327771 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9a480fc-1e8a-4f40-8add-a75b75641f4e" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327779 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1743d256-8adb-4a95-a1d3-ec29d932191f" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327790 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f53b29f5-a2cb-45a2-84ea-71322b48ea8b" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327803 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1bb9974-4ef0-4c60-8aa4-5834cd1cda88" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327815 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eca03fd-c0fb-4900-a1de-1637a499e1cc" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327824 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="03b477be-0073-4390-b87a-acdf508074ee" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.327834 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5c11eb7-a10a-486d-a692-2f3196c2cdbd" containerName="registry-server" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.329427 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.336114 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ljxln"] Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.496358 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-utilities\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.496539 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-catalog-content\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.496578 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx4ds\" (UniqueName: \"kubernetes.io/projected/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-kube-api-access-hx4ds\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.597378 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-utilities\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.597457 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-catalog-content\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.597479 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx4ds\" (UniqueName: \"kubernetes.io/projected/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-kube-api-access-hx4ds\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.597923 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-utilities\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.597981 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-catalog-content\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.622882 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx4ds\" (UniqueName: \"kubernetes.io/projected/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-kube-api-access-hx4ds\") pod \"community-operators-ljxln\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:43 crc kubenswrapper[4940]: I1126 07:29:43.654617 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:44 crc kubenswrapper[4940]: I1126 07:29:44.133851 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ljxln"] Nov 26 07:29:44 crc kubenswrapper[4940]: I1126 07:29:44.546534 4940 generic.go:334] "Generic (PLEG): container finished" podID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerID="ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019" exitCode=0 Nov 26 07:29:44 crc kubenswrapper[4940]: I1126 07:29:44.546574 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ljxln" event={"ID":"54f9f2f7-34da-4b35-853d-7bffa3c23ca2","Type":"ContainerDied","Data":"ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019"} Nov 26 07:29:44 crc kubenswrapper[4940]: I1126 07:29:44.546595 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ljxln" event={"ID":"54f9f2f7-34da-4b35-853d-7bffa3c23ca2","Type":"ContainerStarted","Data":"d65f2e383a4965c981d60c9b71cbba3cb66d469b4f36709aa8e37c6c53a2b893"} Nov 26 07:29:44 crc kubenswrapper[4940]: I1126 07:29:44.547983 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:29:45 crc kubenswrapper[4940]: I1126 07:29:45.559936 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ljxln" event={"ID":"54f9f2f7-34da-4b35-853d-7bffa3c23ca2","Type":"ContainerStarted","Data":"bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c"} Nov 26 07:29:46 crc kubenswrapper[4940]: I1126 07:29:46.571544 4940 generic.go:334] "Generic (PLEG): container finished" podID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerID="bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c" exitCode=0 Nov 26 07:29:46 crc kubenswrapper[4940]: I1126 07:29:46.572242 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ljxln" event={"ID":"54f9f2f7-34da-4b35-853d-7bffa3c23ca2","Type":"ContainerDied","Data":"bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c"} Nov 26 07:29:47 crc kubenswrapper[4940]: I1126 07:29:47.581450 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ljxln" event={"ID":"54f9f2f7-34da-4b35-853d-7bffa3c23ca2","Type":"ContainerStarted","Data":"6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486"} Nov 26 07:29:47 crc kubenswrapper[4940]: I1126 07:29:47.598575 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ljxln" podStartSLOduration=2.193139106 podStartE2EDuration="4.598556716s" podCreationTimestamp="2025-11-26 07:29:43 +0000 UTC" firstStartedPulling="2025-11-26 07:29:44.547799929 +0000 UTC m=+2086.067941548" lastFinishedPulling="2025-11-26 07:29:46.953217539 +0000 UTC m=+2088.473359158" observedRunningTime="2025-11-26 07:29:47.597658389 +0000 UTC m=+2089.117800018" watchObservedRunningTime="2025-11-26 07:29:47.598556716 +0000 UTC m=+2089.118698355" Nov 26 07:29:51 crc kubenswrapper[4940]: I1126 07:29:51.728091 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:29:51 crc kubenswrapper[4940]: I1126 07:29:51.728466 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:29:53 crc kubenswrapper[4940]: I1126 07:29:53.655130 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:53 crc kubenswrapper[4940]: I1126 07:29:53.655514 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:53 crc kubenswrapper[4940]: I1126 07:29:53.734820 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:54 crc kubenswrapper[4940]: I1126 07:29:54.730255 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:54 crc kubenswrapper[4940]: I1126 07:29:54.810286 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ljxln"] Nov 26 07:29:56 crc kubenswrapper[4940]: I1126 07:29:56.665335 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ljxln" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerName="registry-server" containerID="cri-o://6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486" gracePeriod=2 Nov 26 07:29:56 crc kubenswrapper[4940]: E1126 07:29:56.823996 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54f9f2f7_34da_4b35_853d_7bffa3c23ca2.slice/crio-6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.165378 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.202129 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-catalog-content\") pod \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.202197 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-utilities\") pod \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.202237 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx4ds\" (UniqueName: \"kubernetes.io/projected/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-kube-api-access-hx4ds\") pod \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\" (UID: \"54f9f2f7-34da-4b35-853d-7bffa3c23ca2\") " Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.203198 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-utilities" (OuterVolumeSpecName: "utilities") pod "54f9f2f7-34da-4b35-853d-7bffa3c23ca2" (UID: "54f9f2f7-34da-4b35-853d-7bffa3c23ca2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.208651 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-kube-api-access-hx4ds" (OuterVolumeSpecName: "kube-api-access-hx4ds") pod "54f9f2f7-34da-4b35-853d-7bffa3c23ca2" (UID: "54f9f2f7-34da-4b35-853d-7bffa3c23ca2"). InnerVolumeSpecName "kube-api-access-hx4ds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.275645 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "54f9f2f7-34da-4b35-853d-7bffa3c23ca2" (UID: "54f9f2f7-34da-4b35-853d-7bffa3c23ca2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.304783 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.304838 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.304861 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx4ds\" (UniqueName: \"kubernetes.io/projected/54f9f2f7-34da-4b35-853d-7bffa3c23ca2-kube-api-access-hx4ds\") on node \"crc\" DevicePath \"\"" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.675880 4940 generic.go:334] "Generic (PLEG): container finished" podID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerID="6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486" exitCode=0 Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.675944 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ljxln" event={"ID":"54f9f2f7-34da-4b35-853d-7bffa3c23ca2","Type":"ContainerDied","Data":"6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486"} Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.675993 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ljxln" event={"ID":"54f9f2f7-34da-4b35-853d-7bffa3c23ca2","Type":"ContainerDied","Data":"d65f2e383a4965c981d60c9b71cbba3cb66d469b4f36709aa8e37c6c53a2b893"} Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.676093 4940 scope.go:117] "RemoveContainer" containerID="6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.676210 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ljxln" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.711654 4940 scope.go:117] "RemoveContainer" containerID="bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.721215 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ljxln"] Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.730185 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ljxln"] Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.746908 4940 scope.go:117] "RemoveContainer" containerID="ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.790728 4940 scope.go:117] "RemoveContainer" containerID="6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486" Nov 26 07:29:57 crc kubenswrapper[4940]: E1126 07:29:57.791501 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486\": container with ID starting with 6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486 not found: ID does not exist" containerID="6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.791551 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486"} err="failed to get container status \"6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486\": rpc error: code = NotFound desc = could not find container \"6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486\": container with ID starting with 6a02fd9dc8de57c07c0e9e0f7951371dde26e96b317ddce00b6055aef024b486 not found: ID does not exist" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.791583 4940 scope.go:117] "RemoveContainer" containerID="bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c" Nov 26 07:29:57 crc kubenswrapper[4940]: E1126 07:29:57.792442 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c\": container with ID starting with bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c not found: ID does not exist" containerID="bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.792497 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c"} err="failed to get container status \"bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c\": rpc error: code = NotFound desc = could not find container \"bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c\": container with ID starting with bbf9f8454fbe07dfdcf424c59648bcf9c6835e11caf31fa5552730ccd7d9b98c not found: ID does not exist" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.792530 4940 scope.go:117] "RemoveContainer" containerID="ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019" Nov 26 07:29:57 crc kubenswrapper[4940]: E1126 07:29:57.795196 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019\": container with ID starting with ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019 not found: ID does not exist" containerID="ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019" Nov 26 07:29:57 crc kubenswrapper[4940]: I1126 07:29:57.795335 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019"} err="failed to get container status \"ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019\": rpc error: code = NotFound desc = could not find container \"ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019\": container with ID starting with ba3febae4412fb497b4d9a4bb150ac4bf127246cb17bb06584e4c78a61086019 not found: ID does not exist" Nov 26 07:29:59 crc kubenswrapper[4940]: I1126 07:29:59.174499 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" path="/var/lib/kubelet/pods/54f9f2f7-34da-4b35-853d-7bffa3c23ca2/volumes" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.205325 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9"] Nov 26 07:30:00 crc kubenswrapper[4940]: E1126 07:30:00.206222 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerName="extract-content" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.206244 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerName="extract-content" Nov 26 07:30:00 crc kubenswrapper[4940]: E1126 07:30:00.206280 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerName="registry-server" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.206293 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerName="registry-server" Nov 26 07:30:00 crc kubenswrapper[4940]: E1126 07:30:00.206317 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerName="extract-utilities" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.206329 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerName="extract-utilities" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.206583 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="54f9f2f7-34da-4b35-853d-7bffa3c23ca2" containerName="registry-server" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.207291 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.210361 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.210581 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.214904 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9"] Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.344376 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa5c8609-c89c-4674-9bae-9ef14ddca001-secret-volume\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.344454 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5dd9\" (UniqueName: \"kubernetes.io/projected/aa5c8609-c89c-4674-9bae-9ef14ddca001-kube-api-access-c5dd9\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.344813 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa5c8609-c89c-4674-9bae-9ef14ddca001-config-volume\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.445916 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa5c8609-c89c-4674-9bae-9ef14ddca001-config-volume\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.446021 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa5c8609-c89c-4674-9bae-9ef14ddca001-secret-volume\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.446050 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5dd9\" (UniqueName: \"kubernetes.io/projected/aa5c8609-c89c-4674-9bae-9ef14ddca001-kube-api-access-c5dd9\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.446674 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa5c8609-c89c-4674-9bae-9ef14ddca001-config-volume\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.459783 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa5c8609-c89c-4674-9bae-9ef14ddca001-secret-volume\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.467701 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5dd9\" (UniqueName: \"kubernetes.io/projected/aa5c8609-c89c-4674-9bae-9ef14ddca001-kube-api-access-c5dd9\") pod \"collect-profiles-29402370-vpbg9\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.482159 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zkh7s"] Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.483492 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.505351 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zkh7s"] Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.547066 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnppn\" (UniqueName: \"kubernetes.io/projected/c9615db6-c519-4f33-80fc-5e2635e09c19-kube-api-access-tnppn\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.547108 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-catalog-content\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.547344 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-utilities\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.582466 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.648284 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-utilities\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.648770 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-utilities\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.648804 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnppn\" (UniqueName: \"kubernetes.io/projected/c9615db6-c519-4f33-80fc-5e2635e09c19-kube-api-access-tnppn\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.648924 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-catalog-content\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.649202 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-catalog-content\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.676626 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnppn\" (UniqueName: \"kubernetes.io/projected/c9615db6-c519-4f33-80fc-5e2635e09c19-kube-api-access-tnppn\") pod \"redhat-marketplace-zkh7s\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:00 crc kubenswrapper[4940]: I1126 07:30:00.848428 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:01 crc kubenswrapper[4940]: I1126 07:30:01.017141 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9"] Nov 26 07:30:01 crc kubenswrapper[4940]: I1126 07:30:01.267265 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zkh7s"] Nov 26 07:30:01 crc kubenswrapper[4940]: W1126 07:30:01.271755 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9615db6_c519_4f33_80fc_5e2635e09c19.slice/crio-7f5b48511771c6ad39c1a3023de6e8a07ddcc67160c4b3e2fbf8835eee069462 WatchSource:0}: Error finding container 7f5b48511771c6ad39c1a3023de6e8a07ddcc67160c4b3e2fbf8835eee069462: Status 404 returned error can't find the container with id 7f5b48511771c6ad39c1a3023de6e8a07ddcc67160c4b3e2fbf8835eee069462 Nov 26 07:30:01 crc kubenswrapper[4940]: I1126 07:30:01.712576 4940 generic.go:334] "Generic (PLEG): container finished" podID="aa5c8609-c89c-4674-9bae-9ef14ddca001" containerID="641c47ea18b8b04f5d88a4eec1f14583507dfc7d925eda34dd58d0f2353b1d31" exitCode=0 Nov 26 07:30:01 crc kubenswrapper[4940]: I1126 07:30:01.712934 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" event={"ID":"aa5c8609-c89c-4674-9bae-9ef14ddca001","Type":"ContainerDied","Data":"641c47ea18b8b04f5d88a4eec1f14583507dfc7d925eda34dd58d0f2353b1d31"} Nov 26 07:30:01 crc kubenswrapper[4940]: I1126 07:30:01.712974 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" event={"ID":"aa5c8609-c89c-4674-9bae-9ef14ddca001","Type":"ContainerStarted","Data":"cc1be2263bea6dc92c77881d8aeeb5ff44475c6b584e2ad9f0ac5a0beafa3644"} Nov 26 07:30:01 crc kubenswrapper[4940]: I1126 07:30:01.714663 4940 generic.go:334] "Generic (PLEG): container finished" podID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerID="287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2" exitCode=0 Nov 26 07:30:01 crc kubenswrapper[4940]: I1126 07:30:01.714709 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zkh7s" event={"ID":"c9615db6-c519-4f33-80fc-5e2635e09c19","Type":"ContainerDied","Data":"287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2"} Nov 26 07:30:01 crc kubenswrapper[4940]: I1126 07:30:01.714732 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zkh7s" event={"ID":"c9615db6-c519-4f33-80fc-5e2635e09c19","Type":"ContainerStarted","Data":"7f5b48511771c6ad39c1a3023de6e8a07ddcc67160c4b3e2fbf8835eee069462"} Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.010487 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.182671 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa5c8609-c89c-4674-9bae-9ef14ddca001-secret-volume\") pod \"aa5c8609-c89c-4674-9bae-9ef14ddca001\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.183481 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa5c8609-c89c-4674-9bae-9ef14ddca001-config-volume\") pod \"aa5c8609-c89c-4674-9bae-9ef14ddca001\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.183608 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5dd9\" (UniqueName: \"kubernetes.io/projected/aa5c8609-c89c-4674-9bae-9ef14ddca001-kube-api-access-c5dd9\") pod \"aa5c8609-c89c-4674-9bae-9ef14ddca001\" (UID: \"aa5c8609-c89c-4674-9bae-9ef14ddca001\") " Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.184013 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa5c8609-c89c-4674-9bae-9ef14ddca001-config-volume" (OuterVolumeSpecName: "config-volume") pod "aa5c8609-c89c-4674-9bae-9ef14ddca001" (UID: "aa5c8609-c89c-4674-9bae-9ef14ddca001"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.184635 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa5c8609-c89c-4674-9bae-9ef14ddca001-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.188122 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa5c8609-c89c-4674-9bae-9ef14ddca001-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "aa5c8609-c89c-4674-9bae-9ef14ddca001" (UID: "aa5c8609-c89c-4674-9bae-9ef14ddca001"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.188465 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa5c8609-c89c-4674-9bae-9ef14ddca001-kube-api-access-c5dd9" (OuterVolumeSpecName: "kube-api-access-c5dd9") pod "aa5c8609-c89c-4674-9bae-9ef14ddca001" (UID: "aa5c8609-c89c-4674-9bae-9ef14ddca001"). InnerVolumeSpecName "kube-api-access-c5dd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.285797 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5dd9\" (UniqueName: \"kubernetes.io/projected/aa5c8609-c89c-4674-9bae-9ef14ddca001-kube-api-access-c5dd9\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.285840 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa5c8609-c89c-4674-9bae-9ef14ddca001-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.735158 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.735185 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9" event={"ID":"aa5c8609-c89c-4674-9bae-9ef14ddca001","Type":"ContainerDied","Data":"cc1be2263bea6dc92c77881d8aeeb5ff44475c6b584e2ad9f0ac5a0beafa3644"} Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.736130 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc1be2263bea6dc92c77881d8aeeb5ff44475c6b584e2ad9f0ac5a0beafa3644" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.738377 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zkh7s" event={"ID":"c9615db6-c519-4f33-80fc-5e2635e09c19","Type":"ContainerStarted","Data":"0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be"} Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.853781 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4q2mc"] Nov 26 07:30:03 crc kubenswrapper[4940]: E1126 07:30:03.854392 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa5c8609-c89c-4674-9bae-9ef14ddca001" containerName="collect-profiles" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.854744 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa5c8609-c89c-4674-9bae-9ef14ddca001" containerName="collect-profiles" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.855008 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa5c8609-c89c-4674-9bae-9ef14ddca001" containerName="collect-profiles" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.857101 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.869538 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4q2mc"] Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.994973 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-utilities\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.995114 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-catalog-content\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:03 crc kubenswrapper[4940]: I1126 07:30:03.995172 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9ncv\" (UniqueName: \"kubernetes.io/projected/0e4f10cf-fd84-426f-95c0-1a06fd955651-kube-api-access-k9ncv\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.079013 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8"] Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.086790 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-xn5f8"] Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.096821 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9ncv\" (UniqueName: \"kubernetes.io/projected/0e4f10cf-fd84-426f-95c0-1a06fd955651-kube-api-access-k9ncv\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.096924 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-utilities\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.096969 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-catalog-content\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.097366 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-catalog-content\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.097569 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-utilities\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.128185 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9ncv\" (UniqueName: \"kubernetes.io/projected/0e4f10cf-fd84-426f-95c0-1a06fd955651-kube-api-access-k9ncv\") pod \"certified-operators-4q2mc\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.182772 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.613685 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4q2mc"] Nov 26 07:30:04 crc kubenswrapper[4940]: W1126 07:30:04.617477 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e4f10cf_fd84_426f_95c0_1a06fd955651.slice/crio-a2c66719ceb003c7920e2f7c70d70e916135f6ebefaf1a2b98aa4ba16a41330d WatchSource:0}: Error finding container a2c66719ceb003c7920e2f7c70d70e916135f6ebefaf1a2b98aa4ba16a41330d: Status 404 returned error can't find the container with id a2c66719ceb003c7920e2f7c70d70e916135f6ebefaf1a2b98aa4ba16a41330d Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.750351 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4q2mc" event={"ID":"0e4f10cf-fd84-426f-95c0-1a06fd955651","Type":"ContainerStarted","Data":"a2c66719ceb003c7920e2f7c70d70e916135f6ebefaf1a2b98aa4ba16a41330d"} Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.752440 4940 generic.go:334] "Generic (PLEG): container finished" podID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerID="0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be" exitCode=0 Nov 26 07:30:04 crc kubenswrapper[4940]: I1126 07:30:04.752488 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zkh7s" event={"ID":"c9615db6-c519-4f33-80fc-5e2635e09c19","Type":"ContainerDied","Data":"0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be"} Nov 26 07:30:05 crc kubenswrapper[4940]: I1126 07:30:05.181305 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="434b74d5-9895-4254-8d8a-17fec36577ab" path="/var/lib/kubelet/pods/434b74d5-9895-4254-8d8a-17fec36577ab/volumes" Nov 26 07:30:05 crc kubenswrapper[4940]: I1126 07:30:05.759369 4940 generic.go:334] "Generic (PLEG): container finished" podID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerID="29fc60ed894e8f7f13da2b24a5d81243783ae99e3c44f0ae307501c867bd296a" exitCode=0 Nov 26 07:30:05 crc kubenswrapper[4940]: I1126 07:30:05.759438 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4q2mc" event={"ID":"0e4f10cf-fd84-426f-95c0-1a06fd955651","Type":"ContainerDied","Data":"29fc60ed894e8f7f13da2b24a5d81243783ae99e3c44f0ae307501c867bd296a"} Nov 26 07:30:05 crc kubenswrapper[4940]: I1126 07:30:05.761924 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zkh7s" event={"ID":"c9615db6-c519-4f33-80fc-5e2635e09c19","Type":"ContainerStarted","Data":"107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3"} Nov 26 07:30:05 crc kubenswrapper[4940]: I1126 07:30:05.809811 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zkh7s" podStartSLOduration=2.364915784 podStartE2EDuration="5.809794077s" podCreationTimestamp="2025-11-26 07:30:00 +0000 UTC" firstStartedPulling="2025-11-26 07:30:01.716063541 +0000 UTC m=+2103.236205180" lastFinishedPulling="2025-11-26 07:30:05.160941854 +0000 UTC m=+2106.681083473" observedRunningTime="2025-11-26 07:30:05.808324143 +0000 UTC m=+2107.328465822" watchObservedRunningTime="2025-11-26 07:30:05.809794077 +0000 UTC m=+2107.329935706" Nov 26 07:30:06 crc kubenswrapper[4940]: I1126 07:30:06.770915 4940 generic.go:334] "Generic (PLEG): container finished" podID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerID="bbf8f06836ccba1dc846cb207a7970cdec1072c1c4b61cc40482a4a7ff4ed97f" exitCode=0 Nov 26 07:30:06 crc kubenswrapper[4940]: I1126 07:30:06.771023 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4q2mc" event={"ID":"0e4f10cf-fd84-426f-95c0-1a06fd955651","Type":"ContainerDied","Data":"bbf8f06836ccba1dc846cb207a7970cdec1072c1c4b61cc40482a4a7ff4ed97f"} Nov 26 07:30:07 crc kubenswrapper[4940]: I1126 07:30:07.635880 4940 scope.go:117] "RemoveContainer" containerID="e9f2cd7af7f3000b1f019cf5a93e94f2ce5609871806d594839a3da030676025" Nov 26 07:30:07 crc kubenswrapper[4940]: I1126 07:30:07.780283 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4q2mc" event={"ID":"0e4f10cf-fd84-426f-95c0-1a06fd955651","Type":"ContainerStarted","Data":"b8b10e107745736a8ce65be1c978d2d88aec94744d8bfe6dc66c17dc45679864"} Nov 26 07:30:07 crc kubenswrapper[4940]: I1126 07:30:07.799417 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4q2mc" podStartSLOduration=3.3151543119999998 podStartE2EDuration="4.79939992s" podCreationTimestamp="2025-11-26 07:30:03 +0000 UTC" firstStartedPulling="2025-11-26 07:30:05.76057793 +0000 UTC m=+2107.280719549" lastFinishedPulling="2025-11-26 07:30:07.244823538 +0000 UTC m=+2108.764965157" observedRunningTime="2025-11-26 07:30:07.795182964 +0000 UTC m=+2109.315324583" watchObservedRunningTime="2025-11-26 07:30:07.79939992 +0000 UTC m=+2109.319541539" Nov 26 07:30:10 crc kubenswrapper[4940]: I1126 07:30:10.848809 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:10 crc kubenswrapper[4940]: I1126 07:30:10.849145 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:10 crc kubenswrapper[4940]: I1126 07:30:10.923707 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.045086 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bzl7p"] Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.047112 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.062993 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bzl7p"] Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.208396 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vz48n\" (UniqueName: \"kubernetes.io/projected/4c1c05bf-442a-42b8-a90e-0df1b09386a9-kube-api-access-vz48n\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.208506 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-catalog-content\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.208550 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-utilities\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.309910 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vz48n\" (UniqueName: \"kubernetes.io/projected/4c1c05bf-442a-42b8-a90e-0df1b09386a9-kube-api-access-vz48n\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.310018 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-catalog-content\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.310155 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-utilities\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.310954 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-catalog-content\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.311146 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-utilities\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.333880 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vz48n\" (UniqueName: \"kubernetes.io/projected/4c1c05bf-442a-42b8-a90e-0df1b09386a9-kube-api-access-vz48n\") pod \"redhat-operators-bzl7p\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.370148 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.808501 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bzl7p"] Nov 26 07:30:11 crc kubenswrapper[4940]: W1126 07:30:11.809432 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c1c05bf_442a_42b8_a90e_0df1b09386a9.slice/crio-bf8c70859697ca097df267e45b68be59ae41113be2b9771c9ccb83aecbe1d810 WatchSource:0}: Error finding container bf8c70859697ca097df267e45b68be59ae41113be2b9771c9ccb83aecbe1d810: Status 404 returned error can't find the container with id bf8c70859697ca097df267e45b68be59ae41113be2b9771c9ccb83aecbe1d810 Nov 26 07:30:11 crc kubenswrapper[4940]: I1126 07:30:11.862520 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:12 crc kubenswrapper[4940]: I1126 07:30:12.817277 4940 generic.go:334] "Generic (PLEG): container finished" podID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerID="bf4e5f7c43e2795b104ed07bc709823a234802ad32f5ebdde2398bff6cc7a7da" exitCode=0 Nov 26 07:30:12 crc kubenswrapper[4940]: I1126 07:30:12.817385 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzl7p" event={"ID":"4c1c05bf-442a-42b8-a90e-0df1b09386a9","Type":"ContainerDied","Data":"bf4e5f7c43e2795b104ed07bc709823a234802ad32f5ebdde2398bff6cc7a7da"} Nov 26 07:30:12 crc kubenswrapper[4940]: I1126 07:30:12.817447 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzl7p" event={"ID":"4c1c05bf-442a-42b8-a90e-0df1b09386a9","Type":"ContainerStarted","Data":"bf8c70859697ca097df267e45b68be59ae41113be2b9771c9ccb83aecbe1d810"} Nov 26 07:30:13 crc kubenswrapper[4940]: I1126 07:30:13.239756 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zkh7s"] Nov 26 07:30:13 crc kubenswrapper[4940]: I1126 07:30:13.826556 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzl7p" event={"ID":"4c1c05bf-442a-42b8-a90e-0df1b09386a9","Type":"ContainerStarted","Data":"9477accefca916f0518d648f9b71063cdf185cc164524de2485bbf74cb9df39e"} Nov 26 07:30:13 crc kubenswrapper[4940]: I1126 07:30:13.826753 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zkh7s" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerName="registry-server" containerID="cri-o://107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3" gracePeriod=2 Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.183232 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.183275 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.243312 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.252227 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.353072 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnppn\" (UniqueName: \"kubernetes.io/projected/c9615db6-c519-4f33-80fc-5e2635e09c19-kube-api-access-tnppn\") pod \"c9615db6-c519-4f33-80fc-5e2635e09c19\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.353144 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-catalog-content\") pod \"c9615db6-c519-4f33-80fc-5e2635e09c19\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.353252 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-utilities\") pod \"c9615db6-c519-4f33-80fc-5e2635e09c19\" (UID: \"c9615db6-c519-4f33-80fc-5e2635e09c19\") " Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.354803 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-utilities" (OuterVolumeSpecName: "utilities") pod "c9615db6-c519-4f33-80fc-5e2635e09c19" (UID: "c9615db6-c519-4f33-80fc-5e2635e09c19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.359342 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9615db6-c519-4f33-80fc-5e2635e09c19-kube-api-access-tnppn" (OuterVolumeSpecName: "kube-api-access-tnppn") pod "c9615db6-c519-4f33-80fc-5e2635e09c19" (UID: "c9615db6-c519-4f33-80fc-5e2635e09c19"). InnerVolumeSpecName "kube-api-access-tnppn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.377104 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9615db6-c519-4f33-80fc-5e2635e09c19" (UID: "c9615db6-c519-4f33-80fc-5e2635e09c19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.455265 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.455481 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnppn\" (UniqueName: \"kubernetes.io/projected/c9615db6-c519-4f33-80fc-5e2635e09c19-kube-api-access-tnppn\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.455492 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9615db6-c519-4f33-80fc-5e2635e09c19-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.838596 4940 generic.go:334] "Generic (PLEG): container finished" podID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerID="107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3" exitCode=0 Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.838646 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zkh7s" event={"ID":"c9615db6-c519-4f33-80fc-5e2635e09c19","Type":"ContainerDied","Data":"107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3"} Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.838725 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zkh7s" event={"ID":"c9615db6-c519-4f33-80fc-5e2635e09c19","Type":"ContainerDied","Data":"7f5b48511771c6ad39c1a3023de6e8a07ddcc67160c4b3e2fbf8835eee069462"} Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.838668 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zkh7s" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.838766 4940 scope.go:117] "RemoveContainer" containerID="107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.841611 4940 generic.go:334] "Generic (PLEG): container finished" podID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerID="9477accefca916f0518d648f9b71063cdf185cc164524de2485bbf74cb9df39e" exitCode=0 Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.844008 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzl7p" event={"ID":"4c1c05bf-442a-42b8-a90e-0df1b09386a9","Type":"ContainerDied","Data":"9477accefca916f0518d648f9b71063cdf185cc164524de2485bbf74cb9df39e"} Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.863561 4940 scope.go:117] "RemoveContainer" containerID="0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.899631 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zkh7s"] Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.905968 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zkh7s"] Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.910270 4940 scope.go:117] "RemoveContainer" containerID="287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.934315 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.946047 4940 scope.go:117] "RemoveContainer" containerID="107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3" Nov 26 07:30:14 crc kubenswrapper[4940]: E1126 07:30:14.946573 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3\": container with ID starting with 107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3 not found: ID does not exist" containerID="107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.946635 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3"} err="failed to get container status \"107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3\": rpc error: code = NotFound desc = could not find container \"107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3\": container with ID starting with 107c2748027c38d917c22b7ebdade79f21be9a782018abe2907d882e09e315f3 not found: ID does not exist" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.946683 4940 scope.go:117] "RemoveContainer" containerID="0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be" Nov 26 07:30:14 crc kubenswrapper[4940]: E1126 07:30:14.947116 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be\": container with ID starting with 0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be not found: ID does not exist" containerID="0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.947155 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be"} err="failed to get container status \"0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be\": rpc error: code = NotFound desc = could not find container \"0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be\": container with ID starting with 0b00dabcb5b972a779bb0e4be3a9bb6c1d042718144b34d392ea1ef8707d88be not found: ID does not exist" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.947209 4940 scope.go:117] "RemoveContainer" containerID="287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2" Nov 26 07:30:14 crc kubenswrapper[4940]: E1126 07:30:14.948273 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2\": container with ID starting with 287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2 not found: ID does not exist" containerID="287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2" Nov 26 07:30:14 crc kubenswrapper[4940]: I1126 07:30:14.948326 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2"} err="failed to get container status \"287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2\": rpc error: code = NotFound desc = could not find container \"287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2\": container with ID starting with 287c4311beea61a828e34544c13d3e475a6d9168ad87bb6816ad228fe52c53d2 not found: ID does not exist" Nov 26 07:30:15 crc kubenswrapper[4940]: I1126 07:30:15.208305 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" path="/var/lib/kubelet/pods/c9615db6-c519-4f33-80fc-5e2635e09c19/volumes" Nov 26 07:30:15 crc kubenswrapper[4940]: I1126 07:30:15.855761 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzl7p" event={"ID":"4c1c05bf-442a-42b8-a90e-0df1b09386a9","Type":"ContainerStarted","Data":"4296863ccbdf5fce59ec774bc6c490b0b217de7899545eb051f5e3e8b85e8750"} Nov 26 07:30:17 crc kubenswrapper[4940]: I1126 07:30:17.633100 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bzl7p" podStartSLOduration=4.228449228 podStartE2EDuration="6.633068153s" podCreationTimestamp="2025-11-26 07:30:11 +0000 UTC" firstStartedPulling="2025-11-26 07:30:12.818867944 +0000 UTC m=+2114.339009563" lastFinishedPulling="2025-11-26 07:30:15.223486869 +0000 UTC m=+2116.743628488" observedRunningTime="2025-11-26 07:30:15.878740715 +0000 UTC m=+2117.398882374" watchObservedRunningTime="2025-11-26 07:30:17.633068153 +0000 UTC m=+2119.153209772" Nov 26 07:30:17 crc kubenswrapper[4940]: I1126 07:30:17.643554 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4q2mc"] Nov 26 07:30:17 crc kubenswrapper[4940]: I1126 07:30:17.644909 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4q2mc" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerName="registry-server" containerID="cri-o://b8b10e107745736a8ce65be1c978d2d88aec94744d8bfe6dc66c17dc45679864" gracePeriod=2 Nov 26 07:30:17 crc kubenswrapper[4940]: I1126 07:30:17.885869 4940 generic.go:334] "Generic (PLEG): container finished" podID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerID="b8b10e107745736a8ce65be1c978d2d88aec94744d8bfe6dc66c17dc45679864" exitCode=0 Nov 26 07:30:17 crc kubenswrapper[4940]: I1126 07:30:17.886213 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4q2mc" event={"ID":"0e4f10cf-fd84-426f-95c0-1a06fd955651","Type":"ContainerDied","Data":"b8b10e107745736a8ce65be1c978d2d88aec94744d8bfe6dc66c17dc45679864"} Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.022796 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.127899 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-catalog-content\") pod \"0e4f10cf-fd84-426f-95c0-1a06fd955651\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.128214 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9ncv\" (UniqueName: \"kubernetes.io/projected/0e4f10cf-fd84-426f-95c0-1a06fd955651-kube-api-access-k9ncv\") pod \"0e4f10cf-fd84-426f-95c0-1a06fd955651\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.128294 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-utilities\") pod \"0e4f10cf-fd84-426f-95c0-1a06fd955651\" (UID: \"0e4f10cf-fd84-426f-95c0-1a06fd955651\") " Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.129199 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-utilities" (OuterVolumeSpecName: "utilities") pod "0e4f10cf-fd84-426f-95c0-1a06fd955651" (UID: "0e4f10cf-fd84-426f-95c0-1a06fd955651"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.137412 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e4f10cf-fd84-426f-95c0-1a06fd955651-kube-api-access-k9ncv" (OuterVolumeSpecName: "kube-api-access-k9ncv") pod "0e4f10cf-fd84-426f-95c0-1a06fd955651" (UID: "0e4f10cf-fd84-426f-95c0-1a06fd955651"). InnerVolumeSpecName "kube-api-access-k9ncv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.191938 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e4f10cf-fd84-426f-95c0-1a06fd955651" (UID: "0e4f10cf-fd84-426f-95c0-1a06fd955651"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.229957 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.229997 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9ncv\" (UniqueName: \"kubernetes.io/projected/0e4f10cf-fd84-426f-95c0-1a06fd955651-kube-api-access-k9ncv\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.230012 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e4f10cf-fd84-426f-95c0-1a06fd955651-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.899683 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4q2mc" event={"ID":"0e4f10cf-fd84-426f-95c0-1a06fd955651","Type":"ContainerDied","Data":"a2c66719ceb003c7920e2f7c70d70e916135f6ebefaf1a2b98aa4ba16a41330d"} Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.899760 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4q2mc" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.900067 4940 scope.go:117] "RemoveContainer" containerID="b8b10e107745736a8ce65be1c978d2d88aec94744d8bfe6dc66c17dc45679864" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.936326 4940 scope.go:117] "RemoveContainer" containerID="bbf8f06836ccba1dc846cb207a7970cdec1072c1c4b61cc40482a4a7ff4ed97f" Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.941601 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4q2mc"] Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.947650 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4q2mc"] Nov 26 07:30:18 crc kubenswrapper[4940]: I1126 07:30:18.959890 4940 scope.go:117] "RemoveContainer" containerID="29fc60ed894e8f7f13da2b24a5d81243783ae99e3c44f0ae307501c867bd296a" Nov 26 07:30:19 crc kubenswrapper[4940]: I1126 07:30:19.205824 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" path="/var/lib/kubelet/pods/0e4f10cf-fd84-426f-95c0-1a06fd955651/volumes" Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.371269 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.371398 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.448257 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.729028 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.729191 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.729271 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.730309 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3678b720dcf0b48a015b51f1c6a8e187694a275b8e4392b27b4d5c6b565b2e10"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.730450 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://3678b720dcf0b48a015b51f1c6a8e187694a275b8e4392b27b4d5c6b565b2e10" gracePeriod=600 Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.931740 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="3678b720dcf0b48a015b51f1c6a8e187694a275b8e4392b27b4d5c6b565b2e10" exitCode=0 Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.931861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"3678b720dcf0b48a015b51f1c6a8e187694a275b8e4392b27b4d5c6b565b2e10"} Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.932073 4940 scope.go:117] "RemoveContainer" containerID="112d929d83dfa3729d734b650dea58b5d20256bb41add286b3307b8480cd7c5c" Nov 26 07:30:21 crc kubenswrapper[4940]: I1126 07:30:21.993391 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:22 crc kubenswrapper[4940]: I1126 07:30:22.638390 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bzl7p"] Nov 26 07:30:22 crc kubenswrapper[4940]: I1126 07:30:22.947204 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0"} Nov 26 07:30:23 crc kubenswrapper[4940]: I1126 07:30:23.953011 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bzl7p" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerName="registry-server" containerID="cri-o://4296863ccbdf5fce59ec774bc6c490b0b217de7899545eb051f5e3e8b85e8750" gracePeriod=2 Nov 26 07:30:24 crc kubenswrapper[4940]: I1126 07:30:24.967414 4940 generic.go:334] "Generic (PLEG): container finished" podID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerID="4296863ccbdf5fce59ec774bc6c490b0b217de7899545eb051f5e3e8b85e8750" exitCode=0 Nov 26 07:30:24 crc kubenswrapper[4940]: I1126 07:30:24.967485 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzl7p" event={"ID":"4c1c05bf-442a-42b8-a90e-0df1b09386a9","Type":"ContainerDied","Data":"4296863ccbdf5fce59ec774bc6c490b0b217de7899545eb051f5e3e8b85e8750"} Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.580836 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.748191 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vz48n\" (UniqueName: \"kubernetes.io/projected/4c1c05bf-442a-42b8-a90e-0df1b09386a9-kube-api-access-vz48n\") pod \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.748282 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-catalog-content\") pod \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.748373 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-utilities\") pod \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\" (UID: \"4c1c05bf-442a-42b8-a90e-0df1b09386a9\") " Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.749290 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-utilities" (OuterVolumeSpecName: "utilities") pod "4c1c05bf-442a-42b8-a90e-0df1b09386a9" (UID: "4c1c05bf-442a-42b8-a90e-0df1b09386a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.755279 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c1c05bf-442a-42b8-a90e-0df1b09386a9-kube-api-access-vz48n" (OuterVolumeSpecName: "kube-api-access-vz48n") pod "4c1c05bf-442a-42b8-a90e-0df1b09386a9" (UID: "4c1c05bf-442a-42b8-a90e-0df1b09386a9"). InnerVolumeSpecName "kube-api-access-vz48n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.848927 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c1c05bf-442a-42b8-a90e-0df1b09386a9" (UID: "4c1c05bf-442a-42b8-a90e-0df1b09386a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.850087 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vz48n\" (UniqueName: \"kubernetes.io/projected/4c1c05bf-442a-42b8-a90e-0df1b09386a9-kube-api-access-vz48n\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.850173 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.850194 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1c05bf-442a-42b8-a90e-0df1b09386a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.979473 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzl7p" event={"ID":"4c1c05bf-442a-42b8-a90e-0df1b09386a9","Type":"ContainerDied","Data":"bf8c70859697ca097df267e45b68be59ae41113be2b9771c9ccb83aecbe1d810"} Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.979625 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzl7p" Nov 26 07:30:25 crc kubenswrapper[4940]: I1126 07:30:25.979791 4940 scope.go:117] "RemoveContainer" containerID="4296863ccbdf5fce59ec774bc6c490b0b217de7899545eb051f5e3e8b85e8750" Nov 26 07:30:26 crc kubenswrapper[4940]: I1126 07:30:26.026314 4940 scope.go:117] "RemoveContainer" containerID="9477accefca916f0518d648f9b71063cdf185cc164524de2485bbf74cb9df39e" Nov 26 07:30:26 crc kubenswrapper[4940]: I1126 07:30:26.028353 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bzl7p"] Nov 26 07:30:26 crc kubenswrapper[4940]: I1126 07:30:26.038857 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bzl7p"] Nov 26 07:30:26 crc kubenswrapper[4940]: I1126 07:30:26.043145 4940 scope.go:117] "RemoveContainer" containerID="bf4e5f7c43e2795b104ed07bc709823a234802ad32f5ebdde2398bff6cc7a7da" Nov 26 07:30:27 crc kubenswrapper[4940]: I1126 07:30:27.175840 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" path="/var/lib/kubelet/pods/4c1c05bf-442a-42b8-a90e-0df1b09386a9/volumes" Nov 26 07:32:51 crc kubenswrapper[4940]: I1126 07:32:51.728669 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:32:51 crc kubenswrapper[4940]: I1126 07:32:51.729317 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:33:21 crc kubenswrapper[4940]: I1126 07:33:21.728118 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:33:21 crc kubenswrapper[4940]: I1126 07:33:21.728635 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:33:51 crc kubenswrapper[4940]: I1126 07:33:51.728004 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:33:51 crc kubenswrapper[4940]: I1126 07:33:51.728667 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:33:51 crc kubenswrapper[4940]: I1126 07:33:51.728731 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:33:51 crc kubenswrapper[4940]: I1126 07:33:51.729701 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:33:51 crc kubenswrapper[4940]: I1126 07:33:51.729796 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" gracePeriod=600 Nov 26 07:33:51 crc kubenswrapper[4940]: E1126 07:33:51.864935 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:33:51 crc kubenswrapper[4940]: E1126 07:33:51.885485 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1ad7e56d_d7f6_421a_ba5a_d2d8d5b6f6fd.slice/crio-conmon-a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:33:52 crc kubenswrapper[4940]: I1126 07:33:52.115883 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" exitCode=0 Nov 26 07:33:52 crc kubenswrapper[4940]: I1126 07:33:52.115959 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0"} Nov 26 07:33:52 crc kubenswrapper[4940]: I1126 07:33:52.116033 4940 scope.go:117] "RemoveContainer" containerID="3678b720dcf0b48a015b51f1c6a8e187694a275b8e4392b27b4d5c6b565b2e10" Nov 26 07:33:52 crc kubenswrapper[4940]: I1126 07:33:52.116736 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:33:52 crc kubenswrapper[4940]: E1126 07:33:52.117460 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:34:06 crc kubenswrapper[4940]: I1126 07:34:06.166083 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:34:06 crc kubenswrapper[4940]: E1126 07:34:06.166803 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:34:20 crc kubenswrapper[4940]: I1126 07:34:20.165262 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:34:20 crc kubenswrapper[4940]: E1126 07:34:20.166807 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:34:31 crc kubenswrapper[4940]: I1126 07:34:31.176730 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:34:31 crc kubenswrapper[4940]: E1126 07:34:31.178830 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:34:43 crc kubenswrapper[4940]: I1126 07:34:43.165806 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:34:43 crc kubenswrapper[4940]: E1126 07:34:43.168577 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:34:57 crc kubenswrapper[4940]: I1126 07:34:57.166025 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:34:57 crc kubenswrapper[4940]: E1126 07:34:57.166975 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:35:08 crc kubenswrapper[4940]: I1126 07:35:08.165167 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:35:08 crc kubenswrapper[4940]: E1126 07:35:08.166034 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:35:19 crc kubenswrapper[4940]: I1126 07:35:19.170219 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:35:19 crc kubenswrapper[4940]: E1126 07:35:19.171101 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:35:34 crc kubenswrapper[4940]: I1126 07:35:34.165854 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:35:34 crc kubenswrapper[4940]: E1126 07:35:34.166683 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:35:47 crc kubenswrapper[4940]: I1126 07:35:47.165448 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:35:47 crc kubenswrapper[4940]: E1126 07:35:47.166312 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:36:01 crc kubenswrapper[4940]: I1126 07:36:01.166131 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:36:01 crc kubenswrapper[4940]: E1126 07:36:01.166950 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:36:14 crc kubenswrapper[4940]: I1126 07:36:14.165195 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:36:14 crc kubenswrapper[4940]: E1126 07:36:14.166205 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:36:26 crc kubenswrapper[4940]: I1126 07:36:26.165695 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:36:26 crc kubenswrapper[4940]: E1126 07:36:26.166770 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:36:39 crc kubenswrapper[4940]: I1126 07:36:39.172064 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:36:39 crc kubenswrapper[4940]: E1126 07:36:39.172876 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:36:50 crc kubenswrapper[4940]: I1126 07:36:50.165892 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:36:50 crc kubenswrapper[4940]: E1126 07:36:50.166684 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:37:01 crc kubenswrapper[4940]: I1126 07:37:01.165847 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:37:01 crc kubenswrapper[4940]: E1126 07:37:01.166775 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:37:13 crc kubenswrapper[4940]: I1126 07:37:13.165718 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:37:13 crc kubenswrapper[4940]: E1126 07:37:13.166840 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:37:27 crc kubenswrapper[4940]: I1126 07:37:27.165654 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:37:27 crc kubenswrapper[4940]: E1126 07:37:27.166437 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:37:42 crc kubenswrapper[4940]: I1126 07:37:42.166400 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:37:42 crc kubenswrapper[4940]: E1126 07:37:42.167499 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:37:54 crc kubenswrapper[4940]: I1126 07:37:54.164948 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:37:54 crc kubenswrapper[4940]: E1126 07:37:54.165546 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:38:09 crc kubenswrapper[4940]: I1126 07:38:09.166514 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:38:09 crc kubenswrapper[4940]: E1126 07:38:09.167805 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:38:23 crc kubenswrapper[4940]: I1126 07:38:23.165205 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:38:23 crc kubenswrapper[4940]: E1126 07:38:23.166055 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:38:37 crc kubenswrapper[4940]: I1126 07:38:37.166031 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:38:37 crc kubenswrapper[4940]: E1126 07:38:37.167032 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:38:51 crc kubenswrapper[4940]: I1126 07:38:51.165891 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:38:51 crc kubenswrapper[4940]: E1126 07:38:51.166997 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:39:02 crc kubenswrapper[4940]: I1126 07:39:02.165771 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:39:03 crc kubenswrapper[4940]: I1126 07:39:03.121870 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"3ffbd648618684ccf51d95b8ded1952f830dd60f9a0d973f6641f661defd2586"} Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.432535 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rp4hr"] Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433557 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerName="extract-content" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433575 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerName="extract-content" Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433596 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerName="extract-content" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433604 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerName="extract-content" Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433615 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerName="extract-content" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433624 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerName="extract-content" Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433636 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433644 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433656 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433664 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433679 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerName="extract-utilities" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433688 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerName="extract-utilities" Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433696 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerName="extract-utilities" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433705 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerName="extract-utilities" Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433736 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerName="extract-utilities" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433747 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerName="extract-utilities" Nov 26 07:40:01 crc kubenswrapper[4940]: E1126 07:40:01.433768 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433778 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433949 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9615db6-c519-4f33-80fc-5e2635e09c19" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433976 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e4f10cf-fd84-426f-95c0-1a06fd955651" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.433988 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c1c05bf-442a-42b8-a90e-0df1b09386a9" containerName="registry-server" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.435391 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.441523 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rp4hr"] Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.586269 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-catalog-content\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.586335 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v27c\" (UniqueName: \"kubernetes.io/projected/da793304-6231-44c7-9be0-373ceb9210df-kube-api-access-8v27c\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.586373 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-utilities\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.687500 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-catalog-content\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.687579 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v27c\" (UniqueName: \"kubernetes.io/projected/da793304-6231-44c7-9be0-373ceb9210df-kube-api-access-8v27c\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.687636 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-utilities\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.688133 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-utilities\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.688163 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-catalog-content\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.706029 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v27c\" (UniqueName: \"kubernetes.io/projected/da793304-6231-44c7-9be0-373ceb9210df-kube-api-access-8v27c\") pod \"redhat-marketplace-rp4hr\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:01 crc kubenswrapper[4940]: I1126 07:40:01.798957 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:02 crc kubenswrapper[4940]: I1126 07:40:02.230935 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rp4hr"] Nov 26 07:40:02 crc kubenswrapper[4940]: W1126 07:40:02.235423 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda793304_6231_44c7_9be0_373ceb9210df.slice/crio-d8b6257fd410e5426b2de2900533b96d8752e86358b2b3dc05318dc4c22e23b6 WatchSource:0}: Error finding container d8b6257fd410e5426b2de2900533b96d8752e86358b2b3dc05318dc4c22e23b6: Status 404 returned error can't find the container with id d8b6257fd410e5426b2de2900533b96d8752e86358b2b3dc05318dc4c22e23b6 Nov 26 07:40:02 crc kubenswrapper[4940]: I1126 07:40:02.626885 4940 generic.go:334] "Generic (PLEG): container finished" podID="da793304-6231-44c7-9be0-373ceb9210df" containerID="bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127" exitCode=0 Nov 26 07:40:02 crc kubenswrapper[4940]: I1126 07:40:02.627069 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rp4hr" event={"ID":"da793304-6231-44c7-9be0-373ceb9210df","Type":"ContainerDied","Data":"bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127"} Nov 26 07:40:02 crc kubenswrapper[4940]: I1126 07:40:02.627386 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rp4hr" event={"ID":"da793304-6231-44c7-9be0-373ceb9210df","Type":"ContainerStarted","Data":"d8b6257fd410e5426b2de2900533b96d8752e86358b2b3dc05318dc4c22e23b6"} Nov 26 07:40:02 crc kubenswrapper[4940]: I1126 07:40:02.628726 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:40:04 crc kubenswrapper[4940]: I1126 07:40:04.646237 4940 generic.go:334] "Generic (PLEG): container finished" podID="da793304-6231-44c7-9be0-373ceb9210df" containerID="fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242" exitCode=0 Nov 26 07:40:04 crc kubenswrapper[4940]: I1126 07:40:04.646292 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rp4hr" event={"ID":"da793304-6231-44c7-9be0-373ceb9210df","Type":"ContainerDied","Data":"fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242"} Nov 26 07:40:05 crc kubenswrapper[4940]: I1126 07:40:05.654403 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rp4hr" event={"ID":"da793304-6231-44c7-9be0-373ceb9210df","Type":"ContainerStarted","Data":"511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5"} Nov 26 07:40:05 crc kubenswrapper[4940]: I1126 07:40:05.676026 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rp4hr" podStartSLOduration=2.257572643 podStartE2EDuration="4.676008671s" podCreationTimestamp="2025-11-26 07:40:01 +0000 UTC" firstStartedPulling="2025-11-26 07:40:02.628477133 +0000 UTC m=+2704.148618762" lastFinishedPulling="2025-11-26 07:40:05.046913171 +0000 UTC m=+2706.567054790" observedRunningTime="2025-11-26 07:40:05.669895909 +0000 UTC m=+2707.190037538" watchObservedRunningTime="2025-11-26 07:40:05.676008671 +0000 UTC m=+2707.196150290" Nov 26 07:40:11 crc kubenswrapper[4940]: I1126 07:40:11.799508 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:11 crc kubenswrapper[4940]: I1126 07:40:11.800382 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:11 crc kubenswrapper[4940]: I1126 07:40:11.884576 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:12 crc kubenswrapper[4940]: I1126 07:40:12.793520 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:12 crc kubenswrapper[4940]: I1126 07:40:12.875659 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rp4hr"] Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.551236 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-69rnq"] Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.554173 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.563508 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-69rnq"] Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.591615 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-catalog-content\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.591665 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-utilities\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.591724 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw8jv\" (UniqueName: \"kubernetes.io/projected/7890daba-bc55-4139-9770-aa21fba13aeb-kube-api-access-tw8jv\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.692988 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-catalog-content\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.693072 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-utilities\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.693144 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw8jv\" (UniqueName: \"kubernetes.io/projected/7890daba-bc55-4139-9770-aa21fba13aeb-kube-api-access-tw8jv\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.693513 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-catalog-content\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.693573 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-utilities\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.716711 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw8jv\" (UniqueName: \"kubernetes.io/projected/7890daba-bc55-4139-9770-aa21fba13aeb-kube-api-access-tw8jv\") pod \"redhat-operators-69rnq\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.732172 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rp4hr" podUID="da793304-6231-44c7-9be0-373ceb9210df" containerName="registry-server" containerID="cri-o://511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5" gracePeriod=2 Nov 26 07:40:14 crc kubenswrapper[4940]: I1126 07:40:14.885386 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.182841 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.303103 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-catalog-content\") pod \"da793304-6231-44c7-9be0-373ceb9210df\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.303218 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v27c\" (UniqueName: \"kubernetes.io/projected/da793304-6231-44c7-9be0-373ceb9210df-kube-api-access-8v27c\") pod \"da793304-6231-44c7-9be0-373ceb9210df\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.303256 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-utilities\") pod \"da793304-6231-44c7-9be0-373ceb9210df\" (UID: \"da793304-6231-44c7-9be0-373ceb9210df\") " Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.303996 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-utilities" (OuterVolumeSpecName: "utilities") pod "da793304-6231-44c7-9be0-373ceb9210df" (UID: "da793304-6231-44c7-9be0-373ceb9210df"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.310546 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da793304-6231-44c7-9be0-373ceb9210df-kube-api-access-8v27c" (OuterVolumeSpecName: "kube-api-access-8v27c") pod "da793304-6231-44c7-9be0-373ceb9210df" (UID: "da793304-6231-44c7-9be0-373ceb9210df"). InnerVolumeSpecName "kube-api-access-8v27c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.326454 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "da793304-6231-44c7-9be0-373ceb9210df" (UID: "da793304-6231-44c7-9be0-373ceb9210df"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.354319 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-69rnq"] Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.405473 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v27c\" (UniqueName: \"kubernetes.io/projected/da793304-6231-44c7-9be0-373ceb9210df-kube-api-access-8v27c\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.405513 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.405525 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da793304-6231-44c7-9be0-373ceb9210df-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.753504 4940 generic.go:334] "Generic (PLEG): container finished" podID="da793304-6231-44c7-9be0-373ceb9210df" containerID="511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5" exitCode=0 Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.753677 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rp4hr" event={"ID":"da793304-6231-44c7-9be0-373ceb9210df","Type":"ContainerDied","Data":"511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5"} Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.753762 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rp4hr" event={"ID":"da793304-6231-44c7-9be0-373ceb9210df","Type":"ContainerDied","Data":"d8b6257fd410e5426b2de2900533b96d8752e86358b2b3dc05318dc4c22e23b6"} Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.753859 4940 scope.go:117] "RemoveContainer" containerID="511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.753721 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rp4hr" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.758246 4940 generic.go:334] "Generic (PLEG): container finished" podID="7890daba-bc55-4139-9770-aa21fba13aeb" containerID="fc99d34d86f27be563ac2945555d435a9ca6a4d206b0336295456540b41cb7dc" exitCode=0 Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.758318 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69rnq" event={"ID":"7890daba-bc55-4139-9770-aa21fba13aeb","Type":"ContainerDied","Data":"fc99d34d86f27be563ac2945555d435a9ca6a4d206b0336295456540b41cb7dc"} Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.758482 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69rnq" event={"ID":"7890daba-bc55-4139-9770-aa21fba13aeb","Type":"ContainerStarted","Data":"0d2f5b409a58161ec924d993f2df242a3683ee206873090986d4f6fbcfbc93d4"} Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.778111 4940 scope.go:117] "RemoveContainer" containerID="fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.800724 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rp4hr"] Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.807637 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rp4hr"] Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.817097 4940 scope.go:117] "RemoveContainer" containerID="bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.844213 4940 scope.go:117] "RemoveContainer" containerID="511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5" Nov 26 07:40:15 crc kubenswrapper[4940]: E1126 07:40:15.844793 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5\": container with ID starting with 511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5 not found: ID does not exist" containerID="511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.844850 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5"} err="failed to get container status \"511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5\": rpc error: code = NotFound desc = could not find container \"511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5\": container with ID starting with 511727d37d1cf767300daa577ac573f98beadfbd45f5eee9fc3991d88f8d73f5 not found: ID does not exist" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.844877 4940 scope.go:117] "RemoveContainer" containerID="fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242" Nov 26 07:40:15 crc kubenswrapper[4940]: E1126 07:40:15.845445 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242\": container with ID starting with fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242 not found: ID does not exist" containerID="fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.845478 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242"} err="failed to get container status \"fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242\": rpc error: code = NotFound desc = could not find container \"fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242\": container with ID starting with fa5abb709139fc4e3702659d9a08a6c94b32397b67cb20ca7ac63f5092d8c242 not found: ID does not exist" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.845504 4940 scope.go:117] "RemoveContainer" containerID="bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127" Nov 26 07:40:15 crc kubenswrapper[4940]: E1126 07:40:15.847810 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127\": container with ID starting with bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127 not found: ID does not exist" containerID="bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127" Nov 26 07:40:15 crc kubenswrapper[4940]: I1126 07:40:15.847834 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127"} err="failed to get container status \"bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127\": rpc error: code = NotFound desc = could not find container \"bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127\": container with ID starting with bb1ff1cb48fa2a292c289117dceab87ad597a6fe2c0f3dd06ac6359ed7663127 not found: ID does not exist" Nov 26 07:40:16 crc kubenswrapper[4940]: I1126 07:40:16.770877 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69rnq" event={"ID":"7890daba-bc55-4139-9770-aa21fba13aeb","Type":"ContainerStarted","Data":"f5f19ec5a945cf314e92fd38079b42cc9f60d19e4e82e3a96608f1ac87016676"} Nov 26 07:40:17 crc kubenswrapper[4940]: I1126 07:40:17.180168 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da793304-6231-44c7-9be0-373ceb9210df" path="/var/lib/kubelet/pods/da793304-6231-44c7-9be0-373ceb9210df/volumes" Nov 26 07:40:17 crc kubenswrapper[4940]: I1126 07:40:17.792267 4940 generic.go:334] "Generic (PLEG): container finished" podID="7890daba-bc55-4139-9770-aa21fba13aeb" containerID="f5f19ec5a945cf314e92fd38079b42cc9f60d19e4e82e3a96608f1ac87016676" exitCode=0 Nov 26 07:40:17 crc kubenswrapper[4940]: I1126 07:40:17.792376 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69rnq" event={"ID":"7890daba-bc55-4139-9770-aa21fba13aeb","Type":"ContainerDied","Data":"f5f19ec5a945cf314e92fd38079b42cc9f60d19e4e82e3a96608f1ac87016676"} Nov 26 07:40:18 crc kubenswrapper[4940]: I1126 07:40:18.803832 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69rnq" event={"ID":"7890daba-bc55-4139-9770-aa21fba13aeb","Type":"ContainerStarted","Data":"0107a708457b1f01eda590d57627746125c134dacd404283321239c50197cbb7"} Nov 26 07:40:18 crc kubenswrapper[4940]: I1126 07:40:18.830446 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-69rnq" podStartSLOduration=2.363567212 podStartE2EDuration="4.830427871s" podCreationTimestamp="2025-11-26 07:40:14 +0000 UTC" firstStartedPulling="2025-11-26 07:40:15.760298017 +0000 UTC m=+2717.280439646" lastFinishedPulling="2025-11-26 07:40:18.227158686 +0000 UTC m=+2719.747300305" observedRunningTime="2025-11-26 07:40:18.823199132 +0000 UTC m=+2720.343340761" watchObservedRunningTime="2025-11-26 07:40:18.830427871 +0000 UTC m=+2720.350569490" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.548625 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wlklg"] Nov 26 07:40:19 crc kubenswrapper[4940]: E1126 07:40:19.549633 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da793304-6231-44c7-9be0-373ceb9210df" containerName="extract-utilities" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.549674 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="da793304-6231-44c7-9be0-373ceb9210df" containerName="extract-utilities" Nov 26 07:40:19 crc kubenswrapper[4940]: E1126 07:40:19.549717 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da793304-6231-44c7-9be0-373ceb9210df" containerName="extract-content" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.549737 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="da793304-6231-44c7-9be0-373ceb9210df" containerName="extract-content" Nov 26 07:40:19 crc kubenswrapper[4940]: E1126 07:40:19.549780 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da793304-6231-44c7-9be0-373ceb9210df" containerName="registry-server" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.549798 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="da793304-6231-44c7-9be0-373ceb9210df" containerName="registry-server" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.550323 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="da793304-6231-44c7-9be0-373ceb9210df" containerName="registry-server" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.552971 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.571305 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wlklg"] Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.668467 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-utilities\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.668775 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6gpc\" (UniqueName: \"kubernetes.io/projected/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-kube-api-access-d6gpc\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.668947 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-catalog-content\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.769805 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-utilities\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.769847 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6gpc\" (UniqueName: \"kubernetes.io/projected/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-kube-api-access-d6gpc\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.769882 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-catalog-content\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.771128 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-catalog-content\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.773019 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-utilities\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.802583 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6gpc\" (UniqueName: \"kubernetes.io/projected/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-kube-api-access-d6gpc\") pod \"certified-operators-wlklg\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:19 crc kubenswrapper[4940]: I1126 07:40:19.873376 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:20 crc kubenswrapper[4940]: I1126 07:40:20.337222 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wlklg"] Nov 26 07:40:20 crc kubenswrapper[4940]: W1126 07:40:20.341614 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fbe6636_57ac_4aa1_a885_9c56b9bdc174.slice/crio-c2aaf40f8906901c94ede37eaec81e8fc83c59be9b4989667ebf02d9a1489341 WatchSource:0}: Error finding container c2aaf40f8906901c94ede37eaec81e8fc83c59be9b4989667ebf02d9a1489341: Status 404 returned error can't find the container with id c2aaf40f8906901c94ede37eaec81e8fc83c59be9b4989667ebf02d9a1489341 Nov 26 07:40:20 crc kubenswrapper[4940]: I1126 07:40:20.823775 4940 generic.go:334] "Generic (PLEG): container finished" podID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerID="820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60" exitCode=0 Nov 26 07:40:20 crc kubenswrapper[4940]: I1126 07:40:20.823904 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlklg" event={"ID":"6fbe6636-57ac-4aa1-a885-9c56b9bdc174","Type":"ContainerDied","Data":"820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60"} Nov 26 07:40:20 crc kubenswrapper[4940]: I1126 07:40:20.824156 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlklg" event={"ID":"6fbe6636-57ac-4aa1-a885-9c56b9bdc174","Type":"ContainerStarted","Data":"c2aaf40f8906901c94ede37eaec81e8fc83c59be9b4989667ebf02d9a1489341"} Nov 26 07:40:21 crc kubenswrapper[4940]: I1126 07:40:21.833153 4940 generic.go:334] "Generic (PLEG): container finished" podID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerID="770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a" exitCode=0 Nov 26 07:40:21 crc kubenswrapper[4940]: I1126 07:40:21.833193 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlklg" event={"ID":"6fbe6636-57ac-4aa1-a885-9c56b9bdc174","Type":"ContainerDied","Data":"770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a"} Nov 26 07:40:22 crc kubenswrapper[4940]: I1126 07:40:22.842988 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlklg" event={"ID":"6fbe6636-57ac-4aa1-a885-9c56b9bdc174","Type":"ContainerStarted","Data":"85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df"} Nov 26 07:40:22 crc kubenswrapper[4940]: I1126 07:40:22.863138 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wlklg" podStartSLOduration=2.393119978 podStartE2EDuration="3.863118293s" podCreationTimestamp="2025-11-26 07:40:19 +0000 UTC" firstStartedPulling="2025-11-26 07:40:20.826230583 +0000 UTC m=+2722.346372212" lastFinishedPulling="2025-11-26 07:40:22.296228908 +0000 UTC m=+2723.816370527" observedRunningTime="2025-11-26 07:40:22.861361228 +0000 UTC m=+2724.381502857" watchObservedRunningTime="2025-11-26 07:40:22.863118293 +0000 UTC m=+2724.383259912" Nov 26 07:40:24 crc kubenswrapper[4940]: I1126 07:40:24.885536 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:24 crc kubenswrapper[4940]: I1126 07:40:24.885637 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:24 crc kubenswrapper[4940]: I1126 07:40:24.955360 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:25 crc kubenswrapper[4940]: I1126 07:40:25.918964 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:26 crc kubenswrapper[4940]: I1126 07:40:26.930339 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-69rnq"] Nov 26 07:40:27 crc kubenswrapper[4940]: I1126 07:40:27.882729 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-69rnq" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" containerName="registry-server" containerID="cri-o://0107a708457b1f01eda590d57627746125c134dacd404283321239c50197cbb7" gracePeriod=2 Nov 26 07:40:28 crc kubenswrapper[4940]: I1126 07:40:28.897558 4940 generic.go:334] "Generic (PLEG): container finished" podID="7890daba-bc55-4139-9770-aa21fba13aeb" containerID="0107a708457b1f01eda590d57627746125c134dacd404283321239c50197cbb7" exitCode=0 Nov 26 07:40:28 crc kubenswrapper[4940]: I1126 07:40:28.897624 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69rnq" event={"ID":"7890daba-bc55-4139-9770-aa21fba13aeb","Type":"ContainerDied","Data":"0107a708457b1f01eda590d57627746125c134dacd404283321239c50197cbb7"} Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.509568 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.539749 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-catalog-content\") pod \"7890daba-bc55-4139-9770-aa21fba13aeb\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.539871 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-utilities\") pod \"7890daba-bc55-4139-9770-aa21fba13aeb\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.539910 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tw8jv\" (UniqueName: \"kubernetes.io/projected/7890daba-bc55-4139-9770-aa21fba13aeb-kube-api-access-tw8jv\") pod \"7890daba-bc55-4139-9770-aa21fba13aeb\" (UID: \"7890daba-bc55-4139-9770-aa21fba13aeb\") " Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.544736 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-utilities" (OuterVolumeSpecName: "utilities") pod "7890daba-bc55-4139-9770-aa21fba13aeb" (UID: "7890daba-bc55-4139-9770-aa21fba13aeb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.554306 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7890daba-bc55-4139-9770-aa21fba13aeb-kube-api-access-tw8jv" (OuterVolumeSpecName: "kube-api-access-tw8jv") pod "7890daba-bc55-4139-9770-aa21fba13aeb" (UID: "7890daba-bc55-4139-9770-aa21fba13aeb"). InnerVolumeSpecName "kube-api-access-tw8jv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.640603 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.640633 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tw8jv\" (UniqueName: \"kubernetes.io/projected/7890daba-bc55-4139-9770-aa21fba13aeb-kube-api-access-tw8jv\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.659021 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7890daba-bc55-4139-9770-aa21fba13aeb" (UID: "7890daba-bc55-4139-9770-aa21fba13aeb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.742214 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7890daba-bc55-4139-9770-aa21fba13aeb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.874399 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.874543 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.911289 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69rnq" event={"ID":"7890daba-bc55-4139-9770-aa21fba13aeb","Type":"ContainerDied","Data":"0d2f5b409a58161ec924d993f2df242a3683ee206873090986d4f6fbcfbc93d4"} Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.911334 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-69rnq" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.911368 4940 scope.go:117] "RemoveContainer" containerID="0107a708457b1f01eda590d57627746125c134dacd404283321239c50197cbb7" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.939480 4940 scope.go:117] "RemoveContainer" containerID="f5f19ec5a945cf314e92fd38079b42cc9f60d19e4e82e3a96608f1ac87016676" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.959518 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.979928 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-69rnq"] Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.987986 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-69rnq"] Nov 26 07:40:29 crc kubenswrapper[4940]: I1126 07:40:29.993968 4940 scope.go:117] "RemoveContainer" containerID="fc99d34d86f27be563ac2945555d435a9ca6a4d206b0336295456540b41cb7dc" Nov 26 07:40:30 crc kubenswrapper[4940]: E1126 07:40:30.125743 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7890daba_bc55_4139_9770_aa21fba13aeb.slice/crio-0d2f5b409a58161ec924d993f2df242a3683ee206873090986d4f6fbcfbc93d4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7890daba_bc55_4139_9770_aa21fba13aeb.slice\": RecentStats: unable to find data in memory cache]" Nov 26 07:40:30 crc kubenswrapper[4940]: I1126 07:40:30.986384 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:31 crc kubenswrapper[4940]: I1126 07:40:31.182368 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" path="/var/lib/kubelet/pods/7890daba-bc55-4139-9770-aa21fba13aeb/volumes" Nov 26 07:40:32 crc kubenswrapper[4940]: I1126 07:40:32.331649 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wlklg"] Nov 26 07:40:32 crc kubenswrapper[4940]: I1126 07:40:32.938802 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wlklg" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerName="registry-server" containerID="cri-o://85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df" gracePeriod=2 Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.403232 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.499867 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-catalog-content\") pod \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.499943 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6gpc\" (UniqueName: \"kubernetes.io/projected/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-kube-api-access-d6gpc\") pod \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.499965 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-utilities\") pod \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\" (UID: \"6fbe6636-57ac-4aa1-a885-9c56b9bdc174\") " Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.500969 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-utilities" (OuterVolumeSpecName: "utilities") pod "6fbe6636-57ac-4aa1-a885-9c56b9bdc174" (UID: "6fbe6636-57ac-4aa1-a885-9c56b9bdc174"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.508774 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-kube-api-access-d6gpc" (OuterVolumeSpecName: "kube-api-access-d6gpc") pod "6fbe6636-57ac-4aa1-a885-9c56b9bdc174" (UID: "6fbe6636-57ac-4aa1-a885-9c56b9bdc174"). InnerVolumeSpecName "kube-api-access-d6gpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.574827 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6fbe6636-57ac-4aa1-a885-9c56b9bdc174" (UID: "6fbe6636-57ac-4aa1-a885-9c56b9bdc174"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.601272 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6gpc\" (UniqueName: \"kubernetes.io/projected/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-kube-api-access-d6gpc\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.601302 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.601314 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fbe6636-57ac-4aa1-a885-9c56b9bdc174-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.952168 4940 generic.go:334] "Generic (PLEG): container finished" podID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerID="85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df" exitCode=0 Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.952226 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlklg" event={"ID":"6fbe6636-57ac-4aa1-a885-9c56b9bdc174","Type":"ContainerDied","Data":"85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df"} Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.952265 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlklg" event={"ID":"6fbe6636-57ac-4aa1-a885-9c56b9bdc174","Type":"ContainerDied","Data":"c2aaf40f8906901c94ede37eaec81e8fc83c59be9b4989667ebf02d9a1489341"} Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.952293 4940 scope.go:117] "RemoveContainer" containerID="85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.952469 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlklg" Nov 26 07:40:33 crc kubenswrapper[4940]: I1126 07:40:33.987806 4940 scope.go:117] "RemoveContainer" containerID="770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a" Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.000639 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wlklg"] Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.009510 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wlklg"] Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.019485 4940 scope.go:117] "RemoveContainer" containerID="820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60" Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.059224 4940 scope.go:117] "RemoveContainer" containerID="85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df" Nov 26 07:40:34 crc kubenswrapper[4940]: E1126 07:40:34.059746 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df\": container with ID starting with 85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df not found: ID does not exist" containerID="85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df" Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.059808 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df"} err="failed to get container status \"85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df\": rpc error: code = NotFound desc = could not find container \"85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df\": container with ID starting with 85131a1746397c18d7707273d90f6ec24199829dc05f67dfddf75903b0aff4df not found: ID does not exist" Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.059845 4940 scope.go:117] "RemoveContainer" containerID="770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a" Nov 26 07:40:34 crc kubenswrapper[4940]: E1126 07:40:34.060550 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a\": container with ID starting with 770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a not found: ID does not exist" containerID="770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a" Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.060615 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a"} err="failed to get container status \"770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a\": rpc error: code = NotFound desc = could not find container \"770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a\": container with ID starting with 770cee99010f998db066f081819789170401a4d997c9573dd2eb5ce73e49dd9a not found: ID does not exist" Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.060661 4940 scope.go:117] "RemoveContainer" containerID="820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60" Nov 26 07:40:34 crc kubenswrapper[4940]: E1126 07:40:34.061227 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60\": container with ID starting with 820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60 not found: ID does not exist" containerID="820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60" Nov 26 07:40:34 crc kubenswrapper[4940]: I1126 07:40:34.061278 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60"} err="failed to get container status \"820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60\": rpc error: code = NotFound desc = could not find container \"820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60\": container with ID starting with 820ec8e1a09743450684797fa0b2c9ea8405e877bb7adc7423e7c558c6d3aa60 not found: ID does not exist" Nov 26 07:40:35 crc kubenswrapper[4940]: I1126 07:40:35.175931 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" path="/var/lib/kubelet/pods/6fbe6636-57ac-4aa1-a885-9c56b9bdc174/volumes" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.365663 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jkwf9"] Nov 26 07:41:08 crc kubenswrapper[4940]: E1126 07:41:08.366538 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" containerName="registry-server" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.366554 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" containerName="registry-server" Nov 26 07:41:08 crc kubenswrapper[4940]: E1126 07:41:08.366573 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" containerName="extract-content" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.366583 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" containerName="extract-content" Nov 26 07:41:08 crc kubenswrapper[4940]: E1126 07:41:08.366611 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerName="extract-content" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.366619 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerName="extract-content" Nov 26 07:41:08 crc kubenswrapper[4940]: E1126 07:41:08.366640 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" containerName="extract-utilities" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.366649 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" containerName="extract-utilities" Nov 26 07:41:08 crc kubenswrapper[4940]: E1126 07:41:08.366670 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerName="registry-server" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.366678 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerName="registry-server" Nov 26 07:41:08 crc kubenswrapper[4940]: E1126 07:41:08.366700 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerName="extract-utilities" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.366708 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerName="extract-utilities" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.366879 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7890daba-bc55-4139-9770-aa21fba13aeb" containerName="registry-server" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.366901 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fbe6636-57ac-4aa1-a885-9c56b9bdc174" containerName="registry-server" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.368175 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.379885 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jkwf9"] Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.421783 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-utilities\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.422302 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-catalog-content\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.422336 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcjpk\" (UniqueName: \"kubernetes.io/projected/ad474c0f-6749-4ff6-bd66-a0762dab2e46-kube-api-access-xcjpk\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.523829 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-utilities\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.523980 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-catalog-content\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.524020 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcjpk\" (UniqueName: \"kubernetes.io/projected/ad474c0f-6749-4ff6-bd66-a0762dab2e46-kube-api-access-xcjpk\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.524573 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-utilities\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.524652 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-catalog-content\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.554404 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcjpk\" (UniqueName: \"kubernetes.io/projected/ad474c0f-6749-4ff6-bd66-a0762dab2e46-kube-api-access-xcjpk\") pod \"community-operators-jkwf9\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:08 crc kubenswrapper[4940]: I1126 07:41:08.704928 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:09 crc kubenswrapper[4940]: I1126 07:41:09.195320 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jkwf9"] Nov 26 07:41:09 crc kubenswrapper[4940]: I1126 07:41:09.292004 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkwf9" event={"ID":"ad474c0f-6749-4ff6-bd66-a0762dab2e46","Type":"ContainerStarted","Data":"54c6f60a8f2b67dbbf2bdc6181cd081b8c912c705bec93a6a3a3f5893d1c1acd"} Nov 26 07:41:10 crc kubenswrapper[4940]: I1126 07:41:10.303727 4940 generic.go:334] "Generic (PLEG): container finished" podID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerID="a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609" exitCode=0 Nov 26 07:41:10 crc kubenswrapper[4940]: I1126 07:41:10.304202 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkwf9" event={"ID":"ad474c0f-6749-4ff6-bd66-a0762dab2e46","Type":"ContainerDied","Data":"a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609"} Nov 26 07:41:11 crc kubenswrapper[4940]: I1126 07:41:11.319794 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkwf9" event={"ID":"ad474c0f-6749-4ff6-bd66-a0762dab2e46","Type":"ContainerStarted","Data":"65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0"} Nov 26 07:41:12 crc kubenswrapper[4940]: I1126 07:41:12.339378 4940 generic.go:334] "Generic (PLEG): container finished" podID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerID="65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0" exitCode=0 Nov 26 07:41:12 crc kubenswrapper[4940]: I1126 07:41:12.341527 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkwf9" event={"ID":"ad474c0f-6749-4ff6-bd66-a0762dab2e46","Type":"ContainerDied","Data":"65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0"} Nov 26 07:41:13 crc kubenswrapper[4940]: I1126 07:41:13.352769 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkwf9" event={"ID":"ad474c0f-6749-4ff6-bd66-a0762dab2e46","Type":"ContainerStarted","Data":"815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70"} Nov 26 07:41:13 crc kubenswrapper[4940]: I1126 07:41:13.385684 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jkwf9" podStartSLOduration=2.874896348 podStartE2EDuration="5.385658444s" podCreationTimestamp="2025-11-26 07:41:08 +0000 UTC" firstStartedPulling="2025-11-26 07:41:10.30824383 +0000 UTC m=+2771.828385489" lastFinishedPulling="2025-11-26 07:41:12.819005956 +0000 UTC m=+2774.339147585" observedRunningTime="2025-11-26 07:41:13.375769381 +0000 UTC m=+2774.895911010" watchObservedRunningTime="2025-11-26 07:41:13.385658444 +0000 UTC m=+2774.905800093" Nov 26 07:41:18 crc kubenswrapper[4940]: I1126 07:41:18.705551 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:18 crc kubenswrapper[4940]: I1126 07:41:18.705883 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:18 crc kubenswrapper[4940]: I1126 07:41:18.781005 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:19 crc kubenswrapper[4940]: I1126 07:41:19.488686 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:19 crc kubenswrapper[4940]: I1126 07:41:19.557045 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jkwf9"] Nov 26 07:41:21 crc kubenswrapper[4940]: I1126 07:41:21.427008 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jkwf9" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerName="registry-server" containerID="cri-o://815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70" gracePeriod=2 Nov 26 07:41:21 crc kubenswrapper[4940]: I1126 07:41:21.734460 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:41:21 crc kubenswrapper[4940]: I1126 07:41:21.734516 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.000147 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.165564 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcjpk\" (UniqueName: \"kubernetes.io/projected/ad474c0f-6749-4ff6-bd66-a0762dab2e46-kube-api-access-xcjpk\") pod \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.165684 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-utilities\") pod \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.165793 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-catalog-content\") pod \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\" (UID: \"ad474c0f-6749-4ff6-bd66-a0762dab2e46\") " Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.166847 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-utilities" (OuterVolumeSpecName: "utilities") pod "ad474c0f-6749-4ff6-bd66-a0762dab2e46" (UID: "ad474c0f-6749-4ff6-bd66-a0762dab2e46"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.173305 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad474c0f-6749-4ff6-bd66-a0762dab2e46-kube-api-access-xcjpk" (OuterVolumeSpecName: "kube-api-access-xcjpk") pod "ad474c0f-6749-4ff6-bd66-a0762dab2e46" (UID: "ad474c0f-6749-4ff6-bd66-a0762dab2e46"). InnerVolumeSpecName "kube-api-access-xcjpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.263798 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad474c0f-6749-4ff6-bd66-a0762dab2e46" (UID: "ad474c0f-6749-4ff6-bd66-a0762dab2e46"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.267517 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcjpk\" (UniqueName: \"kubernetes.io/projected/ad474c0f-6749-4ff6-bd66-a0762dab2e46-kube-api-access-xcjpk\") on node \"crc\" DevicePath \"\"" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.267559 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.267568 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad474c0f-6749-4ff6-bd66-a0762dab2e46-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.440558 4940 generic.go:334] "Generic (PLEG): container finished" podID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerID="815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70" exitCode=0 Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.440619 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkwf9" event={"ID":"ad474c0f-6749-4ff6-bd66-a0762dab2e46","Type":"ContainerDied","Data":"815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70"} Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.440667 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkwf9" event={"ID":"ad474c0f-6749-4ff6-bd66-a0762dab2e46","Type":"ContainerDied","Data":"54c6f60a8f2b67dbbf2bdc6181cd081b8c912c705bec93a6a3a3f5893d1c1acd"} Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.440683 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkwf9" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.440696 4940 scope.go:117] "RemoveContainer" containerID="815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.476428 4940 scope.go:117] "RemoveContainer" containerID="65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.481692 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jkwf9"] Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.490461 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jkwf9"] Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.506709 4940 scope.go:117] "RemoveContainer" containerID="a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.549834 4940 scope.go:117] "RemoveContainer" containerID="815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70" Nov 26 07:41:22 crc kubenswrapper[4940]: E1126 07:41:22.550474 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70\": container with ID starting with 815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70 not found: ID does not exist" containerID="815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.550528 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70"} err="failed to get container status \"815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70\": rpc error: code = NotFound desc = could not find container \"815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70\": container with ID starting with 815ab1856c9cadfc11dfc6e552e1adaed784d554f0e7239d5afaf493c2d94d70 not found: ID does not exist" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.550564 4940 scope.go:117] "RemoveContainer" containerID="65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0" Nov 26 07:41:22 crc kubenswrapper[4940]: E1126 07:41:22.551013 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0\": container with ID starting with 65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0 not found: ID does not exist" containerID="65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.551044 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0"} err="failed to get container status \"65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0\": rpc error: code = NotFound desc = could not find container \"65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0\": container with ID starting with 65f4e6344db093a767cf9cf731a9d9efa1636d53d38956f81bfb28f80581c5c0 not found: ID does not exist" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.551083 4940 scope.go:117] "RemoveContainer" containerID="a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609" Nov 26 07:41:22 crc kubenswrapper[4940]: E1126 07:41:22.551453 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609\": container with ID starting with a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609 not found: ID does not exist" containerID="a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609" Nov 26 07:41:22 crc kubenswrapper[4940]: I1126 07:41:22.551481 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609"} err="failed to get container status \"a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609\": rpc error: code = NotFound desc = could not find container \"a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609\": container with ID starting with a0222bd58fd3d3557f804c3ba536b4001cdd58dae469c119c36313f4adfff609 not found: ID does not exist" Nov 26 07:41:23 crc kubenswrapper[4940]: I1126 07:41:23.183178 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" path="/var/lib/kubelet/pods/ad474c0f-6749-4ff6-bd66-a0762dab2e46/volumes" Nov 26 07:41:51 crc kubenswrapper[4940]: I1126 07:41:51.728852 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:41:51 crc kubenswrapper[4940]: I1126 07:41:51.729804 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:42:21 crc kubenswrapper[4940]: I1126 07:42:21.727875 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:42:21 crc kubenswrapper[4940]: I1126 07:42:21.728446 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:42:21 crc kubenswrapper[4940]: I1126 07:42:21.728488 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:42:21 crc kubenswrapper[4940]: I1126 07:42:21.729031 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3ffbd648618684ccf51d95b8ded1952f830dd60f9a0d973f6641f661defd2586"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:42:21 crc kubenswrapper[4940]: I1126 07:42:21.729121 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://3ffbd648618684ccf51d95b8ded1952f830dd60f9a0d973f6641f661defd2586" gracePeriod=600 Nov 26 07:42:22 crc kubenswrapper[4940]: I1126 07:42:22.010542 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="3ffbd648618684ccf51d95b8ded1952f830dd60f9a0d973f6641f661defd2586" exitCode=0 Nov 26 07:42:22 crc kubenswrapper[4940]: I1126 07:42:22.010582 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"3ffbd648618684ccf51d95b8ded1952f830dd60f9a0d973f6641f661defd2586"} Nov 26 07:42:22 crc kubenswrapper[4940]: I1126 07:42:22.010613 4940 scope.go:117] "RemoveContainer" containerID="a0f31dfca0a7cb7c55d0be1292941fd0f4f4340403ebfd35606499a78810ead0" Nov 26 07:42:23 crc kubenswrapper[4940]: I1126 07:42:23.036646 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356"} Nov 26 07:44:51 crc kubenswrapper[4940]: I1126 07:44:51.729489 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:44:51 crc kubenswrapper[4940]: I1126 07:44:51.730420 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.166019 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f"] Nov 26 07:45:00 crc kubenswrapper[4940]: E1126 07:45:00.167329 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerName="extract-utilities" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.167363 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerName="extract-utilities" Nov 26 07:45:00 crc kubenswrapper[4940]: E1126 07:45:00.167416 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerName="extract-content" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.167434 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerName="extract-content" Nov 26 07:45:00 crc kubenswrapper[4940]: E1126 07:45:00.167467 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerName="registry-server" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.167485 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerName="registry-server" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.167864 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad474c0f-6749-4ff6-bd66-a0762dab2e46" containerName="registry-server" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.168934 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.174970 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.174988 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.178889 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f"] Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.271933 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-secret-volume\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.272195 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrkh9\" (UniqueName: \"kubernetes.io/projected/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-kube-api-access-qrkh9\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.272232 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-config-volume\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.374120 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-secret-volume\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.374251 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrkh9\" (UniqueName: \"kubernetes.io/projected/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-kube-api-access-qrkh9\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.374352 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-config-volume\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.376452 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-config-volume\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.380814 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-secret-volume\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.391415 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrkh9\" (UniqueName: \"kubernetes.io/projected/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-kube-api-access-qrkh9\") pod \"collect-profiles-29402385-qwx5f\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.491058 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:00 crc kubenswrapper[4940]: I1126 07:45:00.895603 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f"] Nov 26 07:45:00 crc kubenswrapper[4940]: W1126 07:45:00.902438 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0d6b053_b0ff_42ea_9676_0b59f03b6d9e.slice/crio-721b8275987d1f914d00fd20766dfce15c733557068c18cf9eb34e6c2678d41c WatchSource:0}: Error finding container 721b8275987d1f914d00fd20766dfce15c733557068c18cf9eb34e6c2678d41c: Status 404 returned error can't find the container with id 721b8275987d1f914d00fd20766dfce15c733557068c18cf9eb34e6c2678d41c Nov 26 07:45:01 crc kubenswrapper[4940]: I1126 07:45:01.528970 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" event={"ID":"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e","Type":"ContainerDied","Data":"e95dc71a819327999082589106061f0d70d55e281b4dfb73f15c7786b0cd0314"} Nov 26 07:45:01 crc kubenswrapper[4940]: I1126 07:45:01.528818 4940 generic.go:334] "Generic (PLEG): container finished" podID="b0d6b053-b0ff-42ea-9676-0b59f03b6d9e" containerID="e95dc71a819327999082589106061f0d70d55e281b4dfb73f15c7786b0cd0314" exitCode=0 Nov 26 07:45:01 crc kubenswrapper[4940]: I1126 07:45:01.530277 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" event={"ID":"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e","Type":"ContainerStarted","Data":"721b8275987d1f914d00fd20766dfce15c733557068c18cf9eb34e6c2678d41c"} Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.055578 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.102031 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-secret-volume\") pod \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.102126 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrkh9\" (UniqueName: \"kubernetes.io/projected/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-kube-api-access-qrkh9\") pod \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.102207 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-config-volume\") pod \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\" (UID: \"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e\") " Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.103025 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-config-volume" (OuterVolumeSpecName: "config-volume") pod "b0d6b053-b0ff-42ea-9676-0b59f03b6d9e" (UID: "b0d6b053-b0ff-42ea-9676-0b59f03b6d9e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.107607 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b0d6b053-b0ff-42ea-9676-0b59f03b6d9e" (UID: "b0d6b053-b0ff-42ea-9676-0b59f03b6d9e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.108212 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-kube-api-access-qrkh9" (OuterVolumeSpecName: "kube-api-access-qrkh9") pod "b0d6b053-b0ff-42ea-9676-0b59f03b6d9e" (UID: "b0d6b053-b0ff-42ea-9676-0b59f03b6d9e"). InnerVolumeSpecName "kube-api-access-qrkh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.203760 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.203789 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.203799 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrkh9\" (UniqueName: \"kubernetes.io/projected/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e-kube-api-access-qrkh9\") on node \"crc\" DevicePath \"\"" Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.806974 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" event={"ID":"b0d6b053-b0ff-42ea-9676-0b59f03b6d9e","Type":"ContainerDied","Data":"721b8275987d1f914d00fd20766dfce15c733557068c18cf9eb34e6c2678d41c"} Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.807087 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="721b8275987d1f914d00fd20766dfce15c733557068c18cf9eb34e6c2678d41c" Nov 26 07:45:03 crc kubenswrapper[4940]: I1126 07:45:03.807193 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f" Nov 26 07:45:04 crc kubenswrapper[4940]: I1126 07:45:04.144942 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5"] Nov 26 07:45:04 crc kubenswrapper[4940]: I1126 07:45:04.153303 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-chpz5"] Nov 26 07:45:05 crc kubenswrapper[4940]: I1126 07:45:05.179147 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acefc5b4-b4c5-474f-a038-b04842446dc9" path="/var/lib/kubelet/pods/acefc5b4-b4c5-474f-a038-b04842446dc9/volumes" Nov 26 07:45:08 crc kubenswrapper[4940]: I1126 07:45:08.124558 4940 scope.go:117] "RemoveContainer" containerID="cb01e0c93687a515dea84d0d2a9c6a96f5132f0e7d9f9c8a2a79b2c6a89fd119" Nov 26 07:45:21 crc kubenswrapper[4940]: I1126 07:45:21.728481 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:45:21 crc kubenswrapper[4940]: I1126 07:45:21.729146 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:45:51 crc kubenswrapper[4940]: I1126 07:45:51.729080 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:45:51 crc kubenswrapper[4940]: I1126 07:45:51.729693 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:45:51 crc kubenswrapper[4940]: I1126 07:45:51.729769 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:45:51 crc kubenswrapper[4940]: I1126 07:45:51.730629 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:45:51 crc kubenswrapper[4940]: I1126 07:45:51.730732 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" gracePeriod=600 Nov 26 07:45:51 crc kubenswrapper[4940]: E1126 07:45:51.917984 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:45:52 crc kubenswrapper[4940]: I1126 07:45:52.252755 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" exitCode=0 Nov 26 07:45:52 crc kubenswrapper[4940]: I1126 07:45:52.252802 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356"} Nov 26 07:45:52 crc kubenswrapper[4940]: I1126 07:45:52.252833 4940 scope.go:117] "RemoveContainer" containerID="3ffbd648618684ccf51d95b8ded1952f830dd60f9a0d973f6641f661defd2586" Nov 26 07:45:52 crc kubenswrapper[4940]: I1126 07:45:52.253313 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:45:52 crc kubenswrapper[4940]: E1126 07:45:52.253900 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:46:06 crc kubenswrapper[4940]: I1126 07:46:06.165743 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:46:06 crc kubenswrapper[4940]: E1126 07:46:06.166392 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:46:17 crc kubenswrapper[4940]: I1126 07:46:17.165359 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:46:17 crc kubenswrapper[4940]: E1126 07:46:17.166599 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:46:30 crc kubenswrapper[4940]: I1126 07:46:30.166408 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:46:30 crc kubenswrapper[4940]: E1126 07:46:30.167669 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:46:45 crc kubenswrapper[4940]: I1126 07:46:45.186082 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:46:45 crc kubenswrapper[4940]: E1126 07:46:45.186801 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:47:00 crc kubenswrapper[4940]: I1126 07:47:00.165342 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:47:00 crc kubenswrapper[4940]: E1126 07:47:00.167186 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:47:12 crc kubenswrapper[4940]: I1126 07:47:12.166091 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:47:12 crc kubenswrapper[4940]: E1126 07:47:12.166912 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:47:23 crc kubenswrapper[4940]: I1126 07:47:23.166550 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:47:23 crc kubenswrapper[4940]: E1126 07:47:23.167358 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:47:35 crc kubenswrapper[4940]: I1126 07:47:35.165909 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:47:35 crc kubenswrapper[4940]: E1126 07:47:35.166878 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:47:49 crc kubenswrapper[4940]: I1126 07:47:49.170637 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:47:49 crc kubenswrapper[4940]: E1126 07:47:49.171500 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:48:04 crc kubenswrapper[4940]: I1126 07:48:04.165890 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:48:04 crc kubenswrapper[4940]: E1126 07:48:04.166677 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:48:18 crc kubenswrapper[4940]: I1126 07:48:18.165802 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:48:18 crc kubenswrapper[4940]: E1126 07:48:18.168938 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:48:30 crc kubenswrapper[4940]: I1126 07:48:30.165883 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:48:30 crc kubenswrapper[4940]: E1126 07:48:30.166654 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:48:42 crc kubenswrapper[4940]: I1126 07:48:42.165324 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:48:42 crc kubenswrapper[4940]: E1126 07:48:42.165942 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:48:55 crc kubenswrapper[4940]: I1126 07:48:55.166859 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:48:55 crc kubenswrapper[4940]: E1126 07:48:55.167798 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:49:07 crc kubenswrapper[4940]: I1126 07:49:07.165786 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:49:07 crc kubenswrapper[4940]: E1126 07:49:07.166571 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:49:19 crc kubenswrapper[4940]: I1126 07:49:19.170479 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:49:19 crc kubenswrapper[4940]: E1126 07:49:19.171374 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:49:32 crc kubenswrapper[4940]: I1126 07:49:32.165373 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:49:32 crc kubenswrapper[4940]: E1126 07:49:32.166285 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:49:47 crc kubenswrapper[4940]: I1126 07:49:47.165849 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:49:47 crc kubenswrapper[4940]: E1126 07:49:47.166743 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:49:58 crc kubenswrapper[4940]: I1126 07:49:58.166172 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:49:58 crc kubenswrapper[4940]: E1126 07:49:58.167376 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:50:09 crc kubenswrapper[4940]: I1126 07:50:09.171182 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:50:09 crc kubenswrapper[4940]: E1126 07:50:09.172201 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:50:23 crc kubenswrapper[4940]: I1126 07:50:23.165925 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:50:23 crc kubenswrapper[4940]: E1126 07:50:23.167160 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:50:34 crc kubenswrapper[4940]: I1126 07:50:34.165938 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:50:34 crc kubenswrapper[4940]: E1126 07:50:34.167819 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:50:46 crc kubenswrapper[4940]: I1126 07:50:46.165799 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:50:46 crc kubenswrapper[4940]: E1126 07:50:46.167399 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.278847 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9qtv8"] Nov 26 07:50:57 crc kubenswrapper[4940]: E1126 07:50:57.279561 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0d6b053-b0ff-42ea-9676-0b59f03b6d9e" containerName="collect-profiles" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.279579 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0d6b053-b0ff-42ea-9676-0b59f03b6d9e" containerName="collect-profiles" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.279795 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0d6b053-b0ff-42ea-9676-0b59f03b6d9e" containerName="collect-profiles" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.280883 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.296956 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9qtv8"] Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.418026 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmrq9\" (UniqueName: \"kubernetes.io/projected/473155f1-80ea-479a-b816-4369936a8d8b-kube-api-access-mmrq9\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.418117 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-utilities\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.418170 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-catalog-content\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.519787 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmrq9\" (UniqueName: \"kubernetes.io/projected/473155f1-80ea-479a-b816-4369936a8d8b-kube-api-access-mmrq9\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.519881 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-utilities\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.519975 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-catalog-content\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.520426 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-catalog-content\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.520545 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-utilities\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.545189 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmrq9\" (UniqueName: \"kubernetes.io/projected/473155f1-80ea-479a-b816-4369936a8d8b-kube-api-access-mmrq9\") pod \"certified-operators-9qtv8\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:57 crc kubenswrapper[4940]: I1126 07:50:57.622375 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:50:58 crc kubenswrapper[4940]: I1126 07:50:58.114501 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9qtv8"] Nov 26 07:50:58 crc kubenswrapper[4940]: I1126 07:50:58.165507 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:50:58 crc kubenswrapper[4940]: I1126 07:50:58.903756 4940 generic.go:334] "Generic (PLEG): container finished" podID="473155f1-80ea-479a-b816-4369936a8d8b" containerID="0a49bd851d5b5d9e63a16f97c26099c482c5a0671c1105c8a85ae74aafa93bd9" exitCode=0 Nov 26 07:50:58 crc kubenswrapper[4940]: I1126 07:50:58.903807 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qtv8" event={"ID":"473155f1-80ea-479a-b816-4369936a8d8b","Type":"ContainerDied","Data":"0a49bd851d5b5d9e63a16f97c26099c482c5a0671c1105c8a85ae74aafa93bd9"} Nov 26 07:50:58 crc kubenswrapper[4940]: I1126 07:50:58.904091 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qtv8" event={"ID":"473155f1-80ea-479a-b816-4369936a8d8b","Type":"ContainerStarted","Data":"c74ff593d3374cf795d1efbfdef9c8bcc2929755afced9c0f3b9fb8a36184864"} Nov 26 07:50:58 crc kubenswrapper[4940]: I1126 07:50:58.906727 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:50:58 crc kubenswrapper[4940]: I1126 07:50:58.907233 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"91d08a9c993615f97c26c986c131062c06d821e8ea5287973d01a577d1d0fabe"} Nov 26 07:50:59 crc kubenswrapper[4940]: I1126 07:50:59.917739 4940 generic.go:334] "Generic (PLEG): container finished" podID="473155f1-80ea-479a-b816-4369936a8d8b" containerID="39fb4d61c3c9a4e5a2923777ab25b155fe60ce56303b04ca02c2a47358b28a4e" exitCode=0 Nov 26 07:50:59 crc kubenswrapper[4940]: I1126 07:50:59.918233 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qtv8" event={"ID":"473155f1-80ea-479a-b816-4369936a8d8b","Type":"ContainerDied","Data":"39fb4d61c3c9a4e5a2923777ab25b155fe60ce56303b04ca02c2a47358b28a4e"} Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.061559 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kh86w"] Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.063726 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.072580 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh86w"] Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.160580 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-catalog-content\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.160626 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-utilities\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.160652 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2v87\" (UniqueName: \"kubernetes.io/projected/92641330-121c-4f1c-b8cf-71c0de5aacfd-kube-api-access-f2v87\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.262709 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-catalog-content\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.262789 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2v87\" (UniqueName: \"kubernetes.io/projected/92641330-121c-4f1c-b8cf-71c0de5aacfd-kube-api-access-f2v87\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.262815 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-utilities\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.263231 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-utilities\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.263471 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-catalog-content\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.282287 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2v87\" (UniqueName: \"kubernetes.io/projected/92641330-121c-4f1c-b8cf-71c0de5aacfd-kube-api-access-f2v87\") pod \"redhat-marketplace-kh86w\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.434912 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.887032 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh86w"] Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.928645 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qtv8" event={"ID":"473155f1-80ea-479a-b816-4369936a8d8b","Type":"ContainerStarted","Data":"75555cea1a2d11d8f5eec0a961b4b5acf5b532d84374e8f9d69c28ec867e1b41"} Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.930397 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh86w" event={"ID":"92641330-121c-4f1c-b8cf-71c0de5aacfd","Type":"ContainerStarted","Data":"cd4f19bfc4f428882e91ced81ad7ecd130f94038c9aa0b5012bcd30cbff1aada"} Nov 26 07:51:00 crc kubenswrapper[4940]: I1126 07:51:00.949847 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9qtv8" podStartSLOduration=2.527524434 podStartE2EDuration="3.949587425s" podCreationTimestamp="2025-11-26 07:50:57 +0000 UTC" firstStartedPulling="2025-11-26 07:50:58.906432851 +0000 UTC m=+3360.426574470" lastFinishedPulling="2025-11-26 07:51:00.328495842 +0000 UTC m=+3361.848637461" observedRunningTime="2025-11-26 07:51:00.944828375 +0000 UTC m=+3362.464970014" watchObservedRunningTime="2025-11-26 07:51:00.949587425 +0000 UTC m=+3362.469729054" Nov 26 07:51:01 crc kubenswrapper[4940]: I1126 07:51:01.939064 4940 generic.go:334] "Generic (PLEG): container finished" podID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerID="3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464" exitCode=0 Nov 26 07:51:01 crc kubenswrapper[4940]: I1126 07:51:01.939129 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh86w" event={"ID":"92641330-121c-4f1c-b8cf-71c0de5aacfd","Type":"ContainerDied","Data":"3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464"} Nov 26 07:51:03 crc kubenswrapper[4940]: I1126 07:51:03.988072 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh86w" event={"ID":"92641330-121c-4f1c-b8cf-71c0de5aacfd","Type":"ContainerStarted","Data":"dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e"} Nov 26 07:51:05 crc kubenswrapper[4940]: I1126 07:51:05.004207 4940 generic.go:334] "Generic (PLEG): container finished" podID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerID="dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e" exitCode=0 Nov 26 07:51:05 crc kubenswrapper[4940]: I1126 07:51:05.004309 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh86w" event={"ID":"92641330-121c-4f1c-b8cf-71c0de5aacfd","Type":"ContainerDied","Data":"dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e"} Nov 26 07:51:05 crc kubenswrapper[4940]: I1126 07:51:05.004578 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh86w" event={"ID":"92641330-121c-4f1c-b8cf-71c0de5aacfd","Type":"ContainerStarted","Data":"de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9"} Nov 26 07:51:05 crc kubenswrapper[4940]: I1126 07:51:05.034769 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kh86w" podStartSLOduration=2.440486748 podStartE2EDuration="5.034737276s" podCreationTimestamp="2025-11-26 07:51:00 +0000 UTC" firstStartedPulling="2025-11-26 07:51:01.941406974 +0000 UTC m=+3363.461548593" lastFinishedPulling="2025-11-26 07:51:04.535657492 +0000 UTC m=+3366.055799121" observedRunningTime="2025-11-26 07:51:05.02597167 +0000 UTC m=+3366.546113339" watchObservedRunningTime="2025-11-26 07:51:05.034737276 +0000 UTC m=+3366.554878925" Nov 26 07:51:07 crc kubenswrapper[4940]: I1126 07:51:07.624298 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:51:07 crc kubenswrapper[4940]: I1126 07:51:07.624690 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:51:07 crc kubenswrapper[4940]: I1126 07:51:07.695605 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:51:08 crc kubenswrapper[4940]: I1126 07:51:08.113674 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:51:08 crc kubenswrapper[4940]: I1126 07:51:08.867225 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9qtv8"] Nov 26 07:51:10 crc kubenswrapper[4940]: I1126 07:51:10.047662 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9qtv8" podUID="473155f1-80ea-479a-b816-4369936a8d8b" containerName="registry-server" containerID="cri-o://75555cea1a2d11d8f5eec0a961b4b5acf5b532d84374e8f9d69c28ec867e1b41" gracePeriod=2 Nov 26 07:51:10 crc kubenswrapper[4940]: I1126 07:51:10.435741 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:10 crc kubenswrapper[4940]: I1126 07:51:10.436870 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:10 crc kubenswrapper[4940]: I1126 07:51:10.502837 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.056319 4940 generic.go:334] "Generic (PLEG): container finished" podID="473155f1-80ea-479a-b816-4369936a8d8b" containerID="75555cea1a2d11d8f5eec0a961b4b5acf5b532d84374e8f9d69c28ec867e1b41" exitCode=0 Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.056408 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qtv8" event={"ID":"473155f1-80ea-479a-b816-4369936a8d8b","Type":"ContainerDied","Data":"75555cea1a2d11d8f5eec0a961b4b5acf5b532d84374e8f9d69c28ec867e1b41"} Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.108695 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.670089 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.676375 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmrq9\" (UniqueName: \"kubernetes.io/projected/473155f1-80ea-479a-b816-4369936a8d8b-kube-api-access-mmrq9\") pod \"473155f1-80ea-479a-b816-4369936a8d8b\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.676484 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-catalog-content\") pod \"473155f1-80ea-479a-b816-4369936a8d8b\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.676511 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-utilities\") pod \"473155f1-80ea-479a-b816-4369936a8d8b\" (UID: \"473155f1-80ea-479a-b816-4369936a8d8b\") " Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.677537 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-utilities" (OuterVolumeSpecName: "utilities") pod "473155f1-80ea-479a-b816-4369936a8d8b" (UID: "473155f1-80ea-479a-b816-4369936a8d8b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.683438 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/473155f1-80ea-479a-b816-4369936a8d8b-kube-api-access-mmrq9" (OuterVolumeSpecName: "kube-api-access-mmrq9") pod "473155f1-80ea-479a-b816-4369936a8d8b" (UID: "473155f1-80ea-479a-b816-4369936a8d8b"). InnerVolumeSpecName "kube-api-access-mmrq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.726644 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "473155f1-80ea-479a-b816-4369936a8d8b" (UID: "473155f1-80ea-479a-b816-4369936a8d8b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.778780 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.778822 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmrq9\" (UniqueName: \"kubernetes.io/projected/473155f1-80ea-479a-b816-4369936a8d8b-kube-api-access-mmrq9\") on node \"crc\" DevicePath \"\"" Nov 26 07:51:11 crc kubenswrapper[4940]: I1126 07:51:11.778845 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/473155f1-80ea-479a-b816-4369936a8d8b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:51:12 crc kubenswrapper[4940]: I1126 07:51:12.080676 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qtv8" event={"ID":"473155f1-80ea-479a-b816-4369936a8d8b","Type":"ContainerDied","Data":"c74ff593d3374cf795d1efbfdef9c8bcc2929755afced9c0f3b9fb8a36184864"} Nov 26 07:51:12 crc kubenswrapper[4940]: I1126 07:51:12.080772 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9qtv8" Nov 26 07:51:12 crc kubenswrapper[4940]: I1126 07:51:12.080766 4940 scope.go:117] "RemoveContainer" containerID="75555cea1a2d11d8f5eec0a961b4b5acf5b532d84374e8f9d69c28ec867e1b41" Nov 26 07:51:12 crc kubenswrapper[4940]: I1126 07:51:12.132229 4940 scope.go:117] "RemoveContainer" containerID="39fb4d61c3c9a4e5a2923777ab25b155fe60ce56303b04ca02c2a47358b28a4e" Nov 26 07:51:12 crc kubenswrapper[4940]: I1126 07:51:12.155379 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9qtv8"] Nov 26 07:51:12 crc kubenswrapper[4940]: I1126 07:51:12.169178 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9qtv8"] Nov 26 07:51:12 crc kubenswrapper[4940]: I1126 07:51:12.188642 4940 scope.go:117] "RemoveContainer" containerID="0a49bd851d5b5d9e63a16f97c26099c482c5a0671c1105c8a85ae74aafa93bd9" Nov 26 07:51:12 crc kubenswrapper[4940]: I1126 07:51:12.656694 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh86w"] Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.090476 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kh86w" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerName="registry-server" containerID="cri-o://de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9" gracePeriod=2 Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.178834 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="473155f1-80ea-479a-b816-4369936a8d8b" path="/var/lib/kubelet/pods/473155f1-80ea-479a-b816-4369936a8d8b/volumes" Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.537593 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.610390 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-utilities\") pod \"92641330-121c-4f1c-b8cf-71c0de5aacfd\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.610439 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-catalog-content\") pod \"92641330-121c-4f1c-b8cf-71c0de5aacfd\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.610578 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2v87\" (UniqueName: \"kubernetes.io/projected/92641330-121c-4f1c-b8cf-71c0de5aacfd-kube-api-access-f2v87\") pod \"92641330-121c-4f1c-b8cf-71c0de5aacfd\" (UID: \"92641330-121c-4f1c-b8cf-71c0de5aacfd\") " Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.611374 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-utilities" (OuterVolumeSpecName: "utilities") pod "92641330-121c-4f1c-b8cf-71c0de5aacfd" (UID: "92641330-121c-4f1c-b8cf-71c0de5aacfd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.615392 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92641330-121c-4f1c-b8cf-71c0de5aacfd-kube-api-access-f2v87" (OuterVolumeSpecName: "kube-api-access-f2v87") pod "92641330-121c-4f1c-b8cf-71c0de5aacfd" (UID: "92641330-121c-4f1c-b8cf-71c0de5aacfd"). InnerVolumeSpecName "kube-api-access-f2v87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.628354 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92641330-121c-4f1c-b8cf-71c0de5aacfd" (UID: "92641330-121c-4f1c-b8cf-71c0de5aacfd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.712334 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.712365 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92641330-121c-4f1c-b8cf-71c0de5aacfd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:51:13 crc kubenswrapper[4940]: I1126 07:51:13.712377 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2v87\" (UniqueName: \"kubernetes.io/projected/92641330-121c-4f1c-b8cf-71c0de5aacfd-kube-api-access-f2v87\") on node \"crc\" DevicePath \"\"" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.102407 4940 generic.go:334] "Generic (PLEG): container finished" podID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerID="de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9" exitCode=0 Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.102452 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh86w" event={"ID":"92641330-121c-4f1c-b8cf-71c0de5aacfd","Type":"ContainerDied","Data":"de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9"} Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.102481 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh86w" event={"ID":"92641330-121c-4f1c-b8cf-71c0de5aacfd","Type":"ContainerDied","Data":"cd4f19bfc4f428882e91ced81ad7ecd130f94038c9aa0b5012bcd30cbff1aada"} Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.102487 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kh86w" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.102500 4940 scope.go:117] "RemoveContainer" containerID="de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.123805 4940 scope.go:117] "RemoveContainer" containerID="dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.136350 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh86w"] Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.144728 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh86w"] Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.158291 4940 scope.go:117] "RemoveContainer" containerID="3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.175938 4940 scope.go:117] "RemoveContainer" containerID="de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9" Nov 26 07:51:14 crc kubenswrapper[4940]: E1126 07:51:14.176503 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9\": container with ID starting with de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9 not found: ID does not exist" containerID="de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.176667 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9"} err="failed to get container status \"de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9\": rpc error: code = NotFound desc = could not find container \"de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9\": container with ID starting with de2ee1a540ab310dc9106cadc0b40bf31937dc83c20847116a31494e8bda1eb9 not found: ID does not exist" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.176785 4940 scope.go:117] "RemoveContainer" containerID="dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e" Nov 26 07:51:14 crc kubenswrapper[4940]: E1126 07:51:14.177297 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e\": container with ID starting with dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e not found: ID does not exist" containerID="dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.177332 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e"} err="failed to get container status \"dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e\": rpc error: code = NotFound desc = could not find container \"dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e\": container with ID starting with dfbfc048b059538d6b49d390424f0d637ac0fd75d7b5a3a7e19381a5d541839e not found: ID does not exist" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.177359 4940 scope.go:117] "RemoveContainer" containerID="3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464" Nov 26 07:51:14 crc kubenswrapper[4940]: E1126 07:51:14.177623 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464\": container with ID starting with 3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464 not found: ID does not exist" containerID="3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464" Nov 26 07:51:14 crc kubenswrapper[4940]: I1126 07:51:14.177646 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464"} err="failed to get container status \"3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464\": rpc error: code = NotFound desc = could not find container \"3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464\": container with ID starting with 3e8ffaac5ff4a56f0224a93ba634c26f8275fdab47de67b16e8ba1fbfcf25464 not found: ID does not exist" Nov 26 07:51:15 crc kubenswrapper[4940]: I1126 07:51:15.175801 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" path="/var/lib/kubelet/pods/92641330-121c-4f1c-b8cf-71c0de5aacfd/volumes" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.107836 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2b9bn"] Nov 26 07:51:47 crc kubenswrapper[4940]: E1126 07:51:47.109812 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="473155f1-80ea-479a-b816-4369936a8d8b" containerName="extract-utilities" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.109848 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="473155f1-80ea-479a-b816-4369936a8d8b" containerName="extract-utilities" Nov 26 07:51:47 crc kubenswrapper[4940]: E1126 07:51:47.109879 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerName="extract-utilities" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.109903 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerName="extract-utilities" Nov 26 07:51:47 crc kubenswrapper[4940]: E1126 07:51:47.109928 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerName="extract-content" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.109946 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerName="extract-content" Nov 26 07:51:47 crc kubenswrapper[4940]: E1126 07:51:47.109992 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="473155f1-80ea-479a-b816-4369936a8d8b" containerName="extract-content" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.110006 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="473155f1-80ea-479a-b816-4369936a8d8b" containerName="extract-content" Nov 26 07:51:47 crc kubenswrapper[4940]: E1126 07:51:47.110072 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerName="registry-server" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.110089 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerName="registry-server" Nov 26 07:51:47 crc kubenswrapper[4940]: E1126 07:51:47.110125 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="473155f1-80ea-479a-b816-4369936a8d8b" containerName="registry-server" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.110141 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="473155f1-80ea-479a-b816-4369936a8d8b" containerName="registry-server" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.110409 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="92641330-121c-4f1c-b8cf-71c0de5aacfd" containerName="registry-server" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.110455 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="473155f1-80ea-479a-b816-4369936a8d8b" containerName="registry-server" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.113442 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.124634 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2b9bn"] Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.160772 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-utilities\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.160833 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwvkz\" (UniqueName: \"kubernetes.io/projected/5006bf4d-8134-43d0-a8bd-682c090e4f08-kube-api-access-kwvkz\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.160913 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-catalog-content\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.261870 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwvkz\" (UniqueName: \"kubernetes.io/projected/5006bf4d-8134-43d0-a8bd-682c090e4f08-kube-api-access-kwvkz\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.261953 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-catalog-content\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.262084 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-utilities\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.262534 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-utilities\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.262534 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-catalog-content\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.289274 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwvkz\" (UniqueName: \"kubernetes.io/projected/5006bf4d-8134-43d0-a8bd-682c090e4f08-kube-api-access-kwvkz\") pod \"redhat-operators-2b9bn\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.440801 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:47 crc kubenswrapper[4940]: I1126 07:51:47.709429 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2b9bn"] Nov 26 07:51:48 crc kubenswrapper[4940]: I1126 07:51:48.431549 4940 generic.go:334] "Generic (PLEG): container finished" podID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerID="88f848428758481171546bdefb9957911eb33305573bd46a34d218412c25e0cf" exitCode=0 Nov 26 07:51:48 crc kubenswrapper[4940]: I1126 07:51:48.431713 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2b9bn" event={"ID":"5006bf4d-8134-43d0-a8bd-682c090e4f08","Type":"ContainerDied","Data":"88f848428758481171546bdefb9957911eb33305573bd46a34d218412c25e0cf"} Nov 26 07:51:48 crc kubenswrapper[4940]: I1126 07:51:48.431931 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2b9bn" event={"ID":"5006bf4d-8134-43d0-a8bd-682c090e4f08","Type":"ContainerStarted","Data":"9e44b36ff2182515071e20dbafab8136bd73fe0851ee45744ec9135a44ac300f"} Nov 26 07:51:49 crc kubenswrapper[4940]: I1126 07:51:49.441128 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2b9bn" event={"ID":"5006bf4d-8134-43d0-a8bd-682c090e4f08","Type":"ContainerStarted","Data":"39653e0cb8d8383899c60ac6a55cd8e907fe841a3c816c30f58acb1488ba475b"} Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.094920 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-px4tw"] Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.097378 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.135803 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-px4tw"] Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.208988 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgdpg\" (UniqueName: \"kubernetes.io/projected/3bfead87-e642-40e4-acea-c0963637ddef-kube-api-access-pgdpg\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.209072 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-utilities\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.209119 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-catalog-content\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.310638 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgdpg\" (UniqueName: \"kubernetes.io/projected/3bfead87-e642-40e4-acea-c0963637ddef-kube-api-access-pgdpg\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.310696 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-utilities\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.310738 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-catalog-content\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.311235 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-catalog-content\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.311642 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-utilities\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.335807 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgdpg\" (UniqueName: \"kubernetes.io/projected/3bfead87-e642-40e4-acea-c0963637ddef-kube-api-access-pgdpg\") pod \"community-operators-px4tw\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.443912 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.450054 4940 generic.go:334] "Generic (PLEG): container finished" podID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerID="39653e0cb8d8383899c60ac6a55cd8e907fe841a3c816c30f58acb1488ba475b" exitCode=0 Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.450095 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2b9bn" event={"ID":"5006bf4d-8134-43d0-a8bd-682c090e4f08","Type":"ContainerDied","Data":"39653e0cb8d8383899c60ac6a55cd8e907fe841a3c816c30f58acb1488ba475b"} Nov 26 07:51:50 crc kubenswrapper[4940]: I1126 07:51:50.714733 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-px4tw"] Nov 26 07:51:51 crc kubenswrapper[4940]: I1126 07:51:51.460000 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2b9bn" event={"ID":"5006bf4d-8134-43d0-a8bd-682c090e4f08","Type":"ContainerStarted","Data":"2072a55399933dc065d3c98eba0003ac6c6b3670d81e75b792d9817fe6406a3f"} Nov 26 07:51:51 crc kubenswrapper[4940]: I1126 07:51:51.461787 4940 generic.go:334] "Generic (PLEG): container finished" podID="3bfead87-e642-40e4-acea-c0963637ddef" containerID="c4e729d50dc603e7cd7e42450117af2061ff0373a2692dcd7f6f9cc80c05ed3e" exitCode=0 Nov 26 07:51:51 crc kubenswrapper[4940]: I1126 07:51:51.461837 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-px4tw" event={"ID":"3bfead87-e642-40e4-acea-c0963637ddef","Type":"ContainerDied","Data":"c4e729d50dc603e7cd7e42450117af2061ff0373a2692dcd7f6f9cc80c05ed3e"} Nov 26 07:51:51 crc kubenswrapper[4940]: I1126 07:51:51.461860 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-px4tw" event={"ID":"3bfead87-e642-40e4-acea-c0963637ddef","Type":"ContainerStarted","Data":"c28d2f6ea33cab3377c6550d587b4e87641ba24d5950d872344c2a0532dd9245"} Nov 26 07:51:51 crc kubenswrapper[4940]: I1126 07:51:51.482959 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2b9bn" podStartSLOduration=2.000959656 podStartE2EDuration="4.482941542s" podCreationTimestamp="2025-11-26 07:51:47 +0000 UTC" firstStartedPulling="2025-11-26 07:51:48.433370501 +0000 UTC m=+3409.953512120" lastFinishedPulling="2025-11-26 07:51:50.915352387 +0000 UTC m=+3412.435494006" observedRunningTime="2025-11-26 07:51:51.481425544 +0000 UTC m=+3413.001567183" watchObservedRunningTime="2025-11-26 07:51:51.482941542 +0000 UTC m=+3413.003083171" Nov 26 07:51:52 crc kubenswrapper[4940]: I1126 07:51:52.472183 4940 generic.go:334] "Generic (PLEG): container finished" podID="3bfead87-e642-40e4-acea-c0963637ddef" containerID="1356d539603ccd81d8929533779518ccc7b9342de775ae78b80ad101234c327f" exitCode=0 Nov 26 07:51:52 crc kubenswrapper[4940]: I1126 07:51:52.472295 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-px4tw" event={"ID":"3bfead87-e642-40e4-acea-c0963637ddef","Type":"ContainerDied","Data":"1356d539603ccd81d8929533779518ccc7b9342de775ae78b80ad101234c327f"} Nov 26 07:51:53 crc kubenswrapper[4940]: I1126 07:51:53.481373 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-px4tw" event={"ID":"3bfead87-e642-40e4-acea-c0963637ddef","Type":"ContainerStarted","Data":"5c0ea32ee1e25411da05c7770d232721329550a793fc9d463e1ccf5b8ecdb49a"} Nov 26 07:51:53 crc kubenswrapper[4940]: I1126 07:51:53.508247 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-px4tw" podStartSLOduration=1.9927662069999998 podStartE2EDuration="3.508220753s" podCreationTimestamp="2025-11-26 07:51:50 +0000 UTC" firstStartedPulling="2025-11-26 07:51:51.463870061 +0000 UTC m=+3412.984011680" lastFinishedPulling="2025-11-26 07:51:52.979324597 +0000 UTC m=+3414.499466226" observedRunningTime="2025-11-26 07:51:53.498149145 +0000 UTC m=+3415.018290804" watchObservedRunningTime="2025-11-26 07:51:53.508220753 +0000 UTC m=+3415.028362382" Nov 26 07:51:57 crc kubenswrapper[4940]: I1126 07:51:57.441532 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:57 crc kubenswrapper[4940]: I1126 07:51:57.441908 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:57 crc kubenswrapper[4940]: I1126 07:51:57.496008 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:57 crc kubenswrapper[4940]: I1126 07:51:57.567001 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:51:57 crc kubenswrapper[4940]: I1126 07:51:57.882338 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2b9bn"] Nov 26 07:51:59 crc kubenswrapper[4940]: I1126 07:51:59.526144 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2b9bn" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerName="registry-server" containerID="cri-o://2072a55399933dc065d3c98eba0003ac6c6b3670d81e75b792d9817fe6406a3f" gracePeriod=2 Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.444627 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.445097 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.493090 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.541070 4940 generic.go:334] "Generic (PLEG): container finished" podID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerID="2072a55399933dc065d3c98eba0003ac6c6b3670d81e75b792d9817fe6406a3f" exitCode=0 Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.541151 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2b9bn" event={"ID":"5006bf4d-8134-43d0-a8bd-682c090e4f08","Type":"ContainerDied","Data":"2072a55399933dc065d3c98eba0003ac6c6b3670d81e75b792d9817fe6406a3f"} Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.608387 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.764182 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.864561 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwvkz\" (UniqueName: \"kubernetes.io/projected/5006bf4d-8134-43d0-a8bd-682c090e4f08-kube-api-access-kwvkz\") pod \"5006bf4d-8134-43d0-a8bd-682c090e4f08\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.864624 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-utilities\") pod \"5006bf4d-8134-43d0-a8bd-682c090e4f08\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.864802 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-catalog-content\") pod \"5006bf4d-8134-43d0-a8bd-682c090e4f08\" (UID: \"5006bf4d-8134-43d0-a8bd-682c090e4f08\") " Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.865962 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-utilities" (OuterVolumeSpecName: "utilities") pod "5006bf4d-8134-43d0-a8bd-682c090e4f08" (UID: "5006bf4d-8134-43d0-a8bd-682c090e4f08"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.870968 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5006bf4d-8134-43d0-a8bd-682c090e4f08-kube-api-access-kwvkz" (OuterVolumeSpecName: "kube-api-access-kwvkz") pod "5006bf4d-8134-43d0-a8bd-682c090e4f08" (UID: "5006bf4d-8134-43d0-a8bd-682c090e4f08"). InnerVolumeSpecName "kube-api-access-kwvkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.966306 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwvkz\" (UniqueName: \"kubernetes.io/projected/5006bf4d-8134-43d0-a8bd-682c090e4f08-kube-api-access-kwvkz\") on node \"crc\" DevicePath \"\"" Nov 26 07:52:00 crc kubenswrapper[4940]: I1126 07:52:00.966579 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:52:01 crc kubenswrapper[4940]: I1126 07:52:01.549909 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2b9bn" event={"ID":"5006bf4d-8134-43d0-a8bd-682c090e4f08","Type":"ContainerDied","Data":"9e44b36ff2182515071e20dbafab8136bd73fe0851ee45744ec9135a44ac300f"} Nov 26 07:52:01 crc kubenswrapper[4940]: I1126 07:52:01.549947 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2b9bn" Nov 26 07:52:01 crc kubenswrapper[4940]: I1126 07:52:01.549993 4940 scope.go:117] "RemoveContainer" containerID="2072a55399933dc065d3c98eba0003ac6c6b3670d81e75b792d9817fe6406a3f" Nov 26 07:52:01 crc kubenswrapper[4940]: I1126 07:52:01.568943 4940 scope.go:117] "RemoveContainer" containerID="39653e0cb8d8383899c60ac6a55cd8e907fe841a3c816c30f58acb1488ba475b" Nov 26 07:52:01 crc kubenswrapper[4940]: I1126 07:52:01.586269 4940 scope.go:117] "RemoveContainer" containerID="88f848428758481171546bdefb9957911eb33305573bd46a34d218412c25e0cf" Nov 26 07:52:01 crc kubenswrapper[4940]: I1126 07:52:01.685482 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-px4tw"] Nov 26 07:52:02 crc kubenswrapper[4940]: I1126 07:52:02.559583 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-px4tw" podUID="3bfead87-e642-40e4-acea-c0963637ddef" containerName="registry-server" containerID="cri-o://5c0ea32ee1e25411da05c7770d232721329550a793fc9d463e1ccf5b8ecdb49a" gracePeriod=2 Nov 26 07:52:04 crc kubenswrapper[4940]: I1126 07:52:04.171030 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5006bf4d-8134-43d0-a8bd-682c090e4f08" (UID: "5006bf4d-8134-43d0-a8bd-682c090e4f08"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:52:04 crc kubenswrapper[4940]: I1126 07:52:04.214188 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5006bf4d-8134-43d0-a8bd-682c090e4f08-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:52:04 crc kubenswrapper[4940]: I1126 07:52:04.303123 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2b9bn"] Nov 26 07:52:04 crc kubenswrapper[4940]: I1126 07:52:04.309425 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2b9bn"] Nov 26 07:52:04 crc kubenswrapper[4940]: I1126 07:52:04.586410 4940 generic.go:334] "Generic (PLEG): container finished" podID="3bfead87-e642-40e4-acea-c0963637ddef" containerID="5c0ea32ee1e25411da05c7770d232721329550a793fc9d463e1ccf5b8ecdb49a" exitCode=0 Nov 26 07:52:04 crc kubenswrapper[4940]: I1126 07:52:04.586504 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-px4tw" event={"ID":"3bfead87-e642-40e4-acea-c0963637ddef","Type":"ContainerDied","Data":"5c0ea32ee1e25411da05c7770d232721329550a793fc9d463e1ccf5b8ecdb49a"} Nov 26 07:52:05 crc kubenswrapper[4940]: I1126 07:52:05.178335 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" path="/var/lib/kubelet/pods/5006bf4d-8134-43d0-a8bd-682c090e4f08/volumes" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.170312 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.255020 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-utilities\") pod \"3bfead87-e642-40e4-acea-c0963637ddef\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.255168 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgdpg\" (UniqueName: \"kubernetes.io/projected/3bfead87-e642-40e4-acea-c0963637ddef-kube-api-access-pgdpg\") pod \"3bfead87-e642-40e4-acea-c0963637ddef\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.255329 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-catalog-content\") pod \"3bfead87-e642-40e4-acea-c0963637ddef\" (UID: \"3bfead87-e642-40e4-acea-c0963637ddef\") " Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.255956 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-utilities" (OuterVolumeSpecName: "utilities") pod "3bfead87-e642-40e4-acea-c0963637ddef" (UID: "3bfead87-e642-40e4-acea-c0963637ddef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.263290 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bfead87-e642-40e4-acea-c0963637ddef-kube-api-access-pgdpg" (OuterVolumeSpecName: "kube-api-access-pgdpg") pod "3bfead87-e642-40e4-acea-c0963637ddef" (UID: "3bfead87-e642-40e4-acea-c0963637ddef"). InnerVolumeSpecName "kube-api-access-pgdpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.334151 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3bfead87-e642-40e4-acea-c0963637ddef" (UID: "3bfead87-e642-40e4-acea-c0963637ddef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.358225 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.358276 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bfead87-e642-40e4-acea-c0963637ddef-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.358297 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgdpg\" (UniqueName: \"kubernetes.io/projected/3bfead87-e642-40e4-acea-c0963637ddef-kube-api-access-pgdpg\") on node \"crc\" DevicePath \"\"" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.609821 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-px4tw" event={"ID":"3bfead87-e642-40e4-acea-c0963637ddef","Type":"ContainerDied","Data":"c28d2f6ea33cab3377c6550d587b4e87641ba24d5950d872344c2a0532dd9245"} Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.610202 4940 scope.go:117] "RemoveContainer" containerID="5c0ea32ee1e25411da05c7770d232721329550a793fc9d463e1ccf5b8ecdb49a" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.610074 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-px4tw" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.629636 4940 scope.go:117] "RemoveContainer" containerID="1356d539603ccd81d8929533779518ccc7b9342de775ae78b80ad101234c327f" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.673598 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-px4tw"] Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.679157 4940 scope.go:117] "RemoveContainer" containerID="c4e729d50dc603e7cd7e42450117af2061ff0373a2692dcd7f6f9cc80c05ed3e" Nov 26 07:52:06 crc kubenswrapper[4940]: I1126 07:52:06.681302 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-px4tw"] Nov 26 07:52:07 crc kubenswrapper[4940]: I1126 07:52:07.196564 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bfead87-e642-40e4-acea-c0963637ddef" path="/var/lib/kubelet/pods/3bfead87-e642-40e4-acea-c0963637ddef/volumes" Nov 26 07:53:21 crc kubenswrapper[4940]: I1126 07:53:21.728521 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:53:21 crc kubenswrapper[4940]: I1126 07:53:21.729153 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:53:51 crc kubenswrapper[4940]: I1126 07:53:51.728900 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:53:51 crc kubenswrapper[4940]: I1126 07:53:51.731357 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:54:21 crc kubenswrapper[4940]: I1126 07:54:21.728207 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:54:21 crc kubenswrapper[4940]: I1126 07:54:21.728790 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:54:21 crc kubenswrapper[4940]: I1126 07:54:21.728851 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:54:21 crc kubenswrapper[4940]: I1126 07:54:21.729486 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"91d08a9c993615f97c26c986c131062c06d821e8ea5287973d01a577d1d0fabe"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:54:21 crc kubenswrapper[4940]: I1126 07:54:21.729550 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://91d08a9c993615f97c26c986c131062c06d821e8ea5287973d01a577d1d0fabe" gracePeriod=600 Nov 26 07:54:22 crc kubenswrapper[4940]: I1126 07:54:22.846301 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="91d08a9c993615f97c26c986c131062c06d821e8ea5287973d01a577d1d0fabe" exitCode=0 Nov 26 07:54:22 crc kubenswrapper[4940]: I1126 07:54:22.846350 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"91d08a9c993615f97c26c986c131062c06d821e8ea5287973d01a577d1d0fabe"} Nov 26 07:54:22 crc kubenswrapper[4940]: I1126 07:54:22.846635 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95"} Nov 26 07:54:22 crc kubenswrapper[4940]: I1126 07:54:22.846662 4940 scope.go:117] "RemoveContainer" containerID="b3b074f12b88e4ad5af5bd799ed030d85cfd7fa912d145a2b2257a1cde1eb356" Nov 26 07:56:51 crc kubenswrapper[4940]: I1126 07:56:51.728960 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:56:51 crc kubenswrapper[4940]: I1126 07:56:51.729574 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:57:21 crc kubenswrapper[4940]: I1126 07:57:21.728432 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:57:21 crc kubenswrapper[4940]: I1126 07:57:21.729263 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:57:51 crc kubenswrapper[4940]: I1126 07:57:51.728428 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:57:51 crc kubenswrapper[4940]: I1126 07:57:51.729130 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:57:51 crc kubenswrapper[4940]: I1126 07:57:51.729199 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 07:57:51 crc kubenswrapper[4940]: I1126 07:57:51.730132 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:57:51 crc kubenswrapper[4940]: I1126 07:57:51.730234 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" gracePeriod=600 Nov 26 07:57:51 crc kubenswrapper[4940]: E1126 07:57:51.875999 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:57:52 crc kubenswrapper[4940]: I1126 07:57:52.666778 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" exitCode=0 Nov 26 07:57:52 crc kubenswrapper[4940]: I1126 07:57:52.666824 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95"} Nov 26 07:57:52 crc kubenswrapper[4940]: I1126 07:57:52.666863 4940 scope.go:117] "RemoveContainer" containerID="91d08a9c993615f97c26c986c131062c06d821e8ea5287973d01a577d1d0fabe" Nov 26 07:57:52 crc kubenswrapper[4940]: I1126 07:57:52.667405 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:57:52 crc kubenswrapper[4940]: E1126 07:57:52.667642 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:58:06 crc kubenswrapper[4940]: I1126 07:58:06.165570 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:58:06 crc kubenswrapper[4940]: E1126 07:58:06.166396 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:58:18 crc kubenswrapper[4940]: I1126 07:58:18.166834 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:58:18 crc kubenswrapper[4940]: E1126 07:58:18.167705 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:58:30 crc kubenswrapper[4940]: I1126 07:58:30.165600 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:58:30 crc kubenswrapper[4940]: E1126 07:58:30.166430 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:58:43 crc kubenswrapper[4940]: I1126 07:58:43.165449 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:58:43 crc kubenswrapper[4940]: E1126 07:58:43.166385 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:58:55 crc kubenswrapper[4940]: I1126 07:58:55.165313 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:58:55 crc kubenswrapper[4940]: E1126 07:58:55.166092 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:59:10 crc kubenswrapper[4940]: I1126 07:59:10.165555 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:59:10 crc kubenswrapper[4940]: E1126 07:59:10.166450 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:59:25 crc kubenswrapper[4940]: I1126 07:59:25.165824 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:59:25 crc kubenswrapper[4940]: E1126 07:59:25.166952 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:59:36 crc kubenswrapper[4940]: I1126 07:59:36.165451 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:59:36 crc kubenswrapper[4940]: E1126 07:59:36.166053 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 07:59:47 crc kubenswrapper[4940]: I1126 07:59:47.165715 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 07:59:47 crc kubenswrapper[4940]: E1126 07:59:47.166653 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.154409 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf"] Nov 26 08:00:00 crc kubenswrapper[4940]: E1126 08:00:00.156333 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bfead87-e642-40e4-acea-c0963637ddef" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.156446 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bfead87-e642-40e4-acea-c0963637ddef" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4940]: E1126 08:00:00.156533 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerName="extract-content" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.156606 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerName="extract-content" Nov 26 08:00:00 crc kubenswrapper[4940]: E1126 08:00:00.156686 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bfead87-e642-40e4-acea-c0963637ddef" containerName="extract-content" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.156756 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bfead87-e642-40e4-acea-c0963637ddef" containerName="extract-content" Nov 26 08:00:00 crc kubenswrapper[4940]: E1126 08:00:00.156838 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.156915 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4940]: E1126 08:00:00.156999 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerName="extract-utilities" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.157130 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerName="extract-utilities" Nov 26 08:00:00 crc kubenswrapper[4940]: E1126 08:00:00.157260 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bfead87-e642-40e4-acea-c0963637ddef" containerName="extract-utilities" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.157334 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bfead87-e642-40e4-acea-c0963637ddef" containerName="extract-utilities" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.157582 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5006bf4d-8134-43d0-a8bd-682c090e4f08" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.157680 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bfead87-e642-40e4-acea-c0963637ddef" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.158359 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.160698 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.162162 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf"] Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.164623 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.358567 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6885546-c253-4072-9973-596e65bcf799-secret-volume\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.358604 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6885546-c253-4072-9973-596e65bcf799-config-volume\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.358637 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6xxg\" (UniqueName: \"kubernetes.io/projected/c6885546-c253-4072-9973-596e65bcf799-kube-api-access-j6xxg\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.460185 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6885546-c253-4072-9973-596e65bcf799-secret-volume\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.460629 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6885546-c253-4072-9973-596e65bcf799-config-volume\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.460992 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6xxg\" (UniqueName: \"kubernetes.io/projected/c6885546-c253-4072-9973-596e65bcf799-kube-api-access-j6xxg\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.462395 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6885546-c253-4072-9973-596e65bcf799-config-volume\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.482553 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6xxg\" (UniqueName: \"kubernetes.io/projected/c6885546-c253-4072-9973-596e65bcf799-kube-api-access-j6xxg\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.482705 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6885546-c253-4072-9973-596e65bcf799-secret-volume\") pod \"collect-profiles-29402400-xx2qf\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:00 crc kubenswrapper[4940]: I1126 08:00:00.776984 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:01 crc kubenswrapper[4940]: I1126 08:00:01.165474 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:00:01 crc kubenswrapper[4940]: E1126 08:00:01.166069 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:00:01 crc kubenswrapper[4940]: I1126 08:00:01.187188 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf"] Nov 26 08:00:01 crc kubenswrapper[4940]: I1126 08:00:01.693073 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" event={"ID":"c6885546-c253-4072-9973-596e65bcf799","Type":"ContainerStarted","Data":"690ed756d5414940cba6c1b633b54b6fd1ca96e5eaade57510e396d6c720ddf5"} Nov 26 08:00:01 crc kubenswrapper[4940]: I1126 08:00:01.693112 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" event={"ID":"c6885546-c253-4072-9973-596e65bcf799","Type":"ContainerStarted","Data":"e1ee1e590ee92af4ed35adc0d30ea7313c4cac4a993549c2c0e3e6208f7d73c4"} Nov 26 08:00:01 crc kubenswrapper[4940]: I1126 08:00:01.713077 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" podStartSLOduration=1.713055139 podStartE2EDuration="1.713055139s" podCreationTimestamp="2025-11-26 08:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:00:01.708383632 +0000 UTC m=+3903.228525251" watchObservedRunningTime="2025-11-26 08:00:01.713055139 +0000 UTC m=+3903.233196758" Nov 26 08:00:02 crc kubenswrapper[4940]: I1126 08:00:02.701907 4940 generic.go:334] "Generic (PLEG): container finished" podID="c6885546-c253-4072-9973-596e65bcf799" containerID="690ed756d5414940cba6c1b633b54b6fd1ca96e5eaade57510e396d6c720ddf5" exitCode=0 Nov 26 08:00:02 crc kubenswrapper[4940]: I1126 08:00:02.701949 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" event={"ID":"c6885546-c253-4072-9973-596e65bcf799","Type":"ContainerDied","Data":"690ed756d5414940cba6c1b633b54b6fd1ca96e5eaade57510e396d6c720ddf5"} Nov 26 08:00:03 crc kubenswrapper[4940]: I1126 08:00:03.978755 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.125012 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6885546-c253-4072-9973-596e65bcf799-config-volume\") pod \"c6885546-c253-4072-9973-596e65bcf799\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.125102 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6xxg\" (UniqueName: \"kubernetes.io/projected/c6885546-c253-4072-9973-596e65bcf799-kube-api-access-j6xxg\") pod \"c6885546-c253-4072-9973-596e65bcf799\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.125157 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6885546-c253-4072-9973-596e65bcf799-secret-volume\") pod \"c6885546-c253-4072-9973-596e65bcf799\" (UID: \"c6885546-c253-4072-9973-596e65bcf799\") " Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.125579 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6885546-c253-4072-9973-596e65bcf799-config-volume" (OuterVolumeSpecName: "config-volume") pod "c6885546-c253-4072-9973-596e65bcf799" (UID: "c6885546-c253-4072-9973-596e65bcf799"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.130900 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6885546-c253-4072-9973-596e65bcf799-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c6885546-c253-4072-9973-596e65bcf799" (UID: "c6885546-c253-4072-9973-596e65bcf799"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.131183 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6885546-c253-4072-9973-596e65bcf799-kube-api-access-j6xxg" (OuterVolumeSpecName: "kube-api-access-j6xxg") pod "c6885546-c253-4072-9973-596e65bcf799" (UID: "c6885546-c253-4072-9973-596e65bcf799"). InnerVolumeSpecName "kube-api-access-j6xxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.227171 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c6885546-c253-4072-9973-596e65bcf799-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.227216 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6xxg\" (UniqueName: \"kubernetes.io/projected/c6885546-c253-4072-9973-596e65bcf799-kube-api-access-j6xxg\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.227233 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c6885546-c253-4072-9973-596e65bcf799-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.242201 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99"] Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.249581 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-qvm99"] Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.717270 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" event={"ID":"c6885546-c253-4072-9973-596e65bcf799","Type":"ContainerDied","Data":"e1ee1e590ee92af4ed35adc0d30ea7313c4cac4a993549c2c0e3e6208f7d73c4"} Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.717312 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1ee1e590ee92af4ed35adc0d30ea7313c4cac4a993549c2c0e3e6208f7d73c4" Nov 26 08:00:04 crc kubenswrapper[4940]: I1126 08:00:04.717367 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf" Nov 26 08:00:05 crc kubenswrapper[4940]: I1126 08:00:05.176068 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38381651-2c1c-4c63-8a3c-122d6cd39737" path="/var/lib/kubelet/pods/38381651-2c1c-4c63-8a3c-122d6cd39737/volumes" Nov 26 08:00:08 crc kubenswrapper[4940]: I1126 08:00:08.425647 4940 scope.go:117] "RemoveContainer" containerID="49eeee8f895af4609ba9d4c461f6a1836b57ee1532776d3a678ee604d4f405d9" Nov 26 08:00:15 crc kubenswrapper[4940]: I1126 08:00:15.165067 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:00:15 crc kubenswrapper[4940]: E1126 08:00:15.165746 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:00:30 crc kubenswrapper[4940]: I1126 08:00:30.166143 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:00:30 crc kubenswrapper[4940]: E1126 08:00:30.167186 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:00:45 crc kubenswrapper[4940]: I1126 08:00:45.166100 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:00:45 crc kubenswrapper[4940]: E1126 08:00:45.166850 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:00:59 crc kubenswrapper[4940]: I1126 08:00:59.171008 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:00:59 crc kubenswrapper[4940]: E1126 08:00:59.171675 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.665191 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-clwk5"] Nov 26 08:01:03 crc kubenswrapper[4940]: E1126 08:01:03.666190 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6885546-c253-4072-9973-596e65bcf799" containerName="collect-profiles" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.666211 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6885546-c253-4072-9973-596e65bcf799" containerName="collect-profiles" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.666487 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6885546-c253-4072-9973-596e65bcf799" containerName="collect-profiles" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.668233 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.686708 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwk5"] Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.759978 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-utilities\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.760130 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh2bz\" (UniqueName: \"kubernetes.io/projected/432bf507-464f-4af5-91d0-dab73f5a2b1c-kube-api-access-kh2bz\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.760155 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-catalog-content\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.861870 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-utilities\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.862002 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh2bz\" (UniqueName: \"kubernetes.io/projected/432bf507-464f-4af5-91d0-dab73f5a2b1c-kube-api-access-kh2bz\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.862027 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-catalog-content\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.862427 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-utilities\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.862481 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-catalog-content\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:03 crc kubenswrapper[4940]: I1126 08:01:03.893714 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh2bz\" (UniqueName: \"kubernetes.io/projected/432bf507-464f-4af5-91d0-dab73f5a2b1c-kube-api-access-kh2bz\") pod \"redhat-marketplace-clwk5\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:04 crc kubenswrapper[4940]: I1126 08:01:04.001559 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:04 crc kubenswrapper[4940]: I1126 08:01:04.396296 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwk5"] Nov 26 08:01:04 crc kubenswrapper[4940]: W1126 08:01:04.401513 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod432bf507_464f_4af5_91d0_dab73f5a2b1c.slice/crio-9f089e01dbe9e521a166a48f8405953c2956aa39d4cba74df00fdeee28861651 WatchSource:0}: Error finding container 9f089e01dbe9e521a166a48f8405953c2956aa39d4cba74df00fdeee28861651: Status 404 returned error can't find the container with id 9f089e01dbe9e521a166a48f8405953c2956aa39d4cba74df00fdeee28861651 Nov 26 08:01:05 crc kubenswrapper[4940]: I1126 08:01:05.156113 4940 generic.go:334] "Generic (PLEG): container finished" podID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerID="e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e" exitCode=0 Nov 26 08:01:05 crc kubenswrapper[4940]: I1126 08:01:05.156157 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwk5" event={"ID":"432bf507-464f-4af5-91d0-dab73f5a2b1c","Type":"ContainerDied","Data":"e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e"} Nov 26 08:01:05 crc kubenswrapper[4940]: I1126 08:01:05.156344 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwk5" event={"ID":"432bf507-464f-4af5-91d0-dab73f5a2b1c","Type":"ContainerStarted","Data":"9f089e01dbe9e521a166a48f8405953c2956aa39d4cba74df00fdeee28861651"} Nov 26 08:01:05 crc kubenswrapper[4940]: I1126 08:01:05.157930 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.662204 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zhl5h"] Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.664297 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.669118 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zhl5h"] Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.803318 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-utilities\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.803403 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gk6pf\" (UniqueName: \"kubernetes.io/projected/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-kube-api-access-gk6pf\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.803464 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-catalog-content\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.904623 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-utilities\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.904680 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gk6pf\" (UniqueName: \"kubernetes.io/projected/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-kube-api-access-gk6pf\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.904720 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-catalog-content\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.905307 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-catalog-content\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.905440 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-utilities\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.928314 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gk6pf\" (UniqueName: \"kubernetes.io/projected/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-kube-api-access-gk6pf\") pod \"certified-operators-zhl5h\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:06 crc kubenswrapper[4940]: I1126 08:01:06.985986 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:07 crc kubenswrapper[4940]: I1126 08:01:07.182699 4940 generic.go:334] "Generic (PLEG): container finished" podID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerID="5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e" exitCode=0 Nov 26 08:01:07 crc kubenswrapper[4940]: I1126 08:01:07.182735 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwk5" event={"ID":"432bf507-464f-4af5-91d0-dab73f5a2b1c","Type":"ContainerDied","Data":"5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e"} Nov 26 08:01:07 crc kubenswrapper[4940]: I1126 08:01:07.443720 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zhl5h"] Nov 26 08:01:08 crc kubenswrapper[4940]: I1126 08:01:08.194490 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwk5" event={"ID":"432bf507-464f-4af5-91d0-dab73f5a2b1c","Type":"ContainerStarted","Data":"bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8"} Nov 26 08:01:08 crc kubenswrapper[4940]: I1126 08:01:08.197172 4940 generic.go:334] "Generic (PLEG): container finished" podID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerID="ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995" exitCode=0 Nov 26 08:01:08 crc kubenswrapper[4940]: I1126 08:01:08.197213 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhl5h" event={"ID":"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a","Type":"ContainerDied","Data":"ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995"} Nov 26 08:01:08 crc kubenswrapper[4940]: I1126 08:01:08.197237 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhl5h" event={"ID":"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a","Type":"ContainerStarted","Data":"9298b7e87a37d9c435356ed0ca21cd1cd6cc00652ba5b651eaf64ade939c5efd"} Nov 26 08:01:08 crc kubenswrapper[4940]: I1126 08:01:08.222484 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-clwk5" podStartSLOduration=2.6047339259999998 podStartE2EDuration="5.222461987s" podCreationTimestamp="2025-11-26 08:01:03 +0000 UTC" firstStartedPulling="2025-11-26 08:01:05.157683929 +0000 UTC m=+3966.677825548" lastFinishedPulling="2025-11-26 08:01:07.77541199 +0000 UTC m=+3969.295553609" observedRunningTime="2025-11-26 08:01:08.215250539 +0000 UTC m=+3969.735392168" watchObservedRunningTime="2025-11-26 08:01:08.222461987 +0000 UTC m=+3969.742603616" Nov 26 08:01:09 crc kubenswrapper[4940]: I1126 08:01:09.208467 4940 generic.go:334] "Generic (PLEG): container finished" podID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerID="183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647" exitCode=0 Nov 26 08:01:09 crc kubenswrapper[4940]: I1126 08:01:09.208527 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhl5h" event={"ID":"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a","Type":"ContainerDied","Data":"183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647"} Nov 26 08:01:10 crc kubenswrapper[4940]: I1126 08:01:10.220552 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhl5h" event={"ID":"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a","Type":"ContainerStarted","Data":"7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27"} Nov 26 08:01:10 crc kubenswrapper[4940]: I1126 08:01:10.246786 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zhl5h" podStartSLOduration=2.767572162 podStartE2EDuration="4.246765365s" podCreationTimestamp="2025-11-26 08:01:06 +0000 UTC" firstStartedPulling="2025-11-26 08:01:08.198773208 +0000 UTC m=+3969.718914827" lastFinishedPulling="2025-11-26 08:01:09.677966411 +0000 UTC m=+3971.198108030" observedRunningTime="2025-11-26 08:01:10.241961053 +0000 UTC m=+3971.762102752" watchObservedRunningTime="2025-11-26 08:01:10.246765365 +0000 UTC m=+3971.766906994" Nov 26 08:01:12 crc kubenswrapper[4940]: I1126 08:01:12.166227 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:01:12 crc kubenswrapper[4940]: E1126 08:01:12.166517 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:01:14 crc kubenswrapper[4940]: I1126 08:01:14.002384 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:14 crc kubenswrapper[4940]: I1126 08:01:14.002472 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:14 crc kubenswrapper[4940]: I1126 08:01:14.049597 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:14 crc kubenswrapper[4940]: I1126 08:01:14.301348 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:14 crc kubenswrapper[4940]: I1126 08:01:14.343832 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwk5"] Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.281850 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-clwk5" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerName="registry-server" containerID="cri-o://bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8" gracePeriod=2 Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.690918 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.841354 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kh2bz\" (UniqueName: \"kubernetes.io/projected/432bf507-464f-4af5-91d0-dab73f5a2b1c-kube-api-access-kh2bz\") pod \"432bf507-464f-4af5-91d0-dab73f5a2b1c\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.841485 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-utilities\") pod \"432bf507-464f-4af5-91d0-dab73f5a2b1c\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.841533 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-catalog-content\") pod \"432bf507-464f-4af5-91d0-dab73f5a2b1c\" (UID: \"432bf507-464f-4af5-91d0-dab73f5a2b1c\") " Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.842626 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-utilities" (OuterVolumeSpecName: "utilities") pod "432bf507-464f-4af5-91d0-dab73f5a2b1c" (UID: "432bf507-464f-4af5-91d0-dab73f5a2b1c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.847165 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/432bf507-464f-4af5-91d0-dab73f5a2b1c-kube-api-access-kh2bz" (OuterVolumeSpecName: "kube-api-access-kh2bz") pod "432bf507-464f-4af5-91d0-dab73f5a2b1c" (UID: "432bf507-464f-4af5-91d0-dab73f5a2b1c"). InnerVolumeSpecName "kube-api-access-kh2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.862726 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "432bf507-464f-4af5-91d0-dab73f5a2b1c" (UID: "432bf507-464f-4af5-91d0-dab73f5a2b1c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.943105 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.943389 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/432bf507-464f-4af5-91d0-dab73f5a2b1c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.943448 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kh2bz\" (UniqueName: \"kubernetes.io/projected/432bf507-464f-4af5-91d0-dab73f5a2b1c-kube-api-access-kh2bz\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.987512 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:16 crc kubenswrapper[4940]: I1126 08:01:16.987805 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.028966 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.291474 4940 generic.go:334] "Generic (PLEG): container finished" podID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerID="bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8" exitCode=0 Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.292180 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-clwk5" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.292168 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwk5" event={"ID":"432bf507-464f-4af5-91d0-dab73f5a2b1c","Type":"ContainerDied","Data":"bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8"} Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.292376 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-clwk5" event={"ID":"432bf507-464f-4af5-91d0-dab73f5a2b1c","Type":"ContainerDied","Data":"9f089e01dbe9e521a166a48f8405953c2956aa39d4cba74df00fdeee28861651"} Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.292402 4940 scope.go:117] "RemoveContainer" containerID="bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.317428 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwk5"] Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.322551 4940 scope.go:117] "RemoveContainer" containerID="5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.323258 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-clwk5"] Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.366719 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.373848 4940 scope.go:117] "RemoveContainer" containerID="e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.399555 4940 scope.go:117] "RemoveContainer" containerID="bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8" Nov 26 08:01:17 crc kubenswrapper[4940]: E1126 08:01:17.400062 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8\": container with ID starting with bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8 not found: ID does not exist" containerID="bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.400168 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8"} err="failed to get container status \"bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8\": rpc error: code = NotFound desc = could not find container \"bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8\": container with ID starting with bbc6cf13f46ee8be526eb64dbbf4121f51b8a545f6ca92b2f33bc31238302fc8 not found: ID does not exist" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.400242 4940 scope.go:117] "RemoveContainer" containerID="5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e" Nov 26 08:01:17 crc kubenswrapper[4940]: E1126 08:01:17.400551 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e\": container with ID starting with 5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e not found: ID does not exist" containerID="5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.400578 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e"} err="failed to get container status \"5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e\": rpc error: code = NotFound desc = could not find container \"5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e\": container with ID starting with 5bb7273ef920ff2b05349d5f57c1e3487e76879c347a1112624ee14a1632e44e not found: ID does not exist" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.400593 4940 scope.go:117] "RemoveContainer" containerID="e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e" Nov 26 08:01:17 crc kubenswrapper[4940]: E1126 08:01:17.400809 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e\": container with ID starting with e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e not found: ID does not exist" containerID="e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.400828 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e"} err="failed to get container status \"e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e\": rpc error: code = NotFound desc = could not find container \"e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e\": container with ID starting with e95331bb63127b3402edbe93cc6c2af255d654f7503dbaa2412f572620d8f22e not found: ID does not exist" Nov 26 08:01:17 crc kubenswrapper[4940]: I1126 08:01:17.884477 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zhl5h"] Nov 26 08:01:19 crc kubenswrapper[4940]: I1126 08:01:19.177079 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" path="/var/lib/kubelet/pods/432bf507-464f-4af5-91d0-dab73f5a2b1c/volumes" Nov 26 08:01:19 crc kubenswrapper[4940]: I1126 08:01:19.313423 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zhl5h" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerName="registry-server" containerID="cri-o://7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27" gracePeriod=2 Nov 26 08:01:19 crc kubenswrapper[4940]: I1126 08:01:19.843341 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:19 crc kubenswrapper[4940]: I1126 08:01:19.992305 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gk6pf\" (UniqueName: \"kubernetes.io/projected/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-kube-api-access-gk6pf\") pod \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " Nov 26 08:01:19 crc kubenswrapper[4940]: I1126 08:01:19.992414 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-catalog-content\") pod \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " Nov 26 08:01:19 crc kubenswrapper[4940]: I1126 08:01:19.992470 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-utilities\") pod \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\" (UID: \"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a\") " Nov 26 08:01:19 crc kubenswrapper[4940]: I1126 08:01:19.994383 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-utilities" (OuterVolumeSpecName: "utilities") pod "0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" (UID: "0b81417b-4b60-465f-b6a8-b7abe5cf3c9a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.001376 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-kube-api-access-gk6pf" (OuterVolumeSpecName: "kube-api-access-gk6pf") pod "0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" (UID: "0b81417b-4b60-465f-b6a8-b7abe5cf3c9a"). InnerVolumeSpecName "kube-api-access-gk6pf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.093738 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gk6pf\" (UniqueName: \"kubernetes.io/projected/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-kube-api-access-gk6pf\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.093772 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.325314 4940 generic.go:334] "Generic (PLEG): container finished" podID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerID="7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27" exitCode=0 Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.325366 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhl5h" event={"ID":"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a","Type":"ContainerDied","Data":"7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27"} Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.325395 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zhl5h" event={"ID":"0b81417b-4b60-465f-b6a8-b7abe5cf3c9a","Type":"ContainerDied","Data":"9298b7e87a37d9c435356ed0ca21cd1cd6cc00652ba5b651eaf64ade939c5efd"} Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.325413 4940 scope.go:117] "RemoveContainer" containerID="7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.325577 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zhl5h" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.346018 4940 scope.go:117] "RemoveContainer" containerID="183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.372983 4940 scope.go:117] "RemoveContainer" containerID="ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.397180 4940 scope.go:117] "RemoveContainer" containerID="7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27" Nov 26 08:01:20 crc kubenswrapper[4940]: E1126 08:01:20.397832 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27\": container with ID starting with 7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27 not found: ID does not exist" containerID="7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.397881 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27"} err="failed to get container status \"7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27\": rpc error: code = NotFound desc = could not find container \"7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27\": container with ID starting with 7e99077005c40b93f9df8d6c27e50c36c06aab9f081aec382a8e7f395151af27 not found: ID does not exist" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.397912 4940 scope.go:117] "RemoveContainer" containerID="183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647" Nov 26 08:01:20 crc kubenswrapper[4940]: E1126 08:01:20.398220 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647\": container with ID starting with 183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647 not found: ID does not exist" containerID="183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.398249 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647"} err="failed to get container status \"183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647\": rpc error: code = NotFound desc = could not find container \"183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647\": container with ID starting with 183ddc754482bc135ea7234f3bdc1f2bdfadf074c355e409e68a0a4716e01647 not found: ID does not exist" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.398263 4940 scope.go:117] "RemoveContainer" containerID="ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995" Nov 26 08:01:20 crc kubenswrapper[4940]: E1126 08:01:20.398513 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995\": container with ID starting with ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995 not found: ID does not exist" containerID="ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.398541 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995"} err="failed to get container status \"ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995\": rpc error: code = NotFound desc = could not find container \"ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995\": container with ID starting with ee4b30388fdd74d73178e8dbe1936cd7dcac485e92a2899a5fbc15139d888995 not found: ID does not exist" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.527410 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" (UID: "0b81417b-4b60-465f-b6a8-b7abe5cf3c9a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.601345 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.674948 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zhl5h"] Nov 26 08:01:20 crc kubenswrapper[4940]: I1126 08:01:20.681510 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zhl5h"] Nov 26 08:01:21 crc kubenswrapper[4940]: I1126 08:01:21.180657 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" path="/var/lib/kubelet/pods/0b81417b-4b60-465f-b6a8-b7abe5cf3c9a/volumes" Nov 26 08:01:27 crc kubenswrapper[4940]: I1126 08:01:27.166280 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:01:27 crc kubenswrapper[4940]: E1126 08:01:27.167219 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:01:41 crc kubenswrapper[4940]: I1126 08:01:41.165236 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:01:41 crc kubenswrapper[4940]: E1126 08:01:41.166158 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:01:55 crc kubenswrapper[4940]: I1126 08:01:55.165461 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:01:55 crc kubenswrapper[4940]: E1126 08:01:55.166159 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:02:10 crc kubenswrapper[4940]: I1126 08:02:10.166470 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:02:10 crc kubenswrapper[4940]: E1126 08:02:10.169183 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:02:23 crc kubenswrapper[4940]: I1126 08:02:23.165436 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:02:23 crc kubenswrapper[4940]: E1126 08:02:23.167490 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.319232 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l4jkd"] Nov 26 08:02:30 crc kubenswrapper[4940]: E1126 08:02:30.320271 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerName="extract-utilities" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.320294 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerName="extract-utilities" Nov 26 08:02:30 crc kubenswrapper[4940]: E1126 08:02:30.320330 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerName="extract-content" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.320342 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerName="extract-content" Nov 26 08:02:30 crc kubenswrapper[4940]: E1126 08:02:30.320373 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerName="registry-server" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.320384 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerName="registry-server" Nov 26 08:02:30 crc kubenswrapper[4940]: E1126 08:02:30.320401 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerName="extract-content" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.320410 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerName="extract-content" Nov 26 08:02:30 crc kubenswrapper[4940]: E1126 08:02:30.320429 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerName="registry-server" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.320441 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerName="registry-server" Nov 26 08:02:30 crc kubenswrapper[4940]: E1126 08:02:30.320466 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerName="extract-utilities" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.320479 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerName="extract-utilities" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.320770 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="432bf507-464f-4af5-91d0-dab73f5a2b1c" containerName="registry-server" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.320800 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b81417b-4b60-465f-b6a8-b7abe5cf3c9a" containerName="registry-server" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.322397 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.334183 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l4jkd"] Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.414287 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-utilities\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.414718 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dq76d\" (UniqueName: \"kubernetes.io/projected/6f30de05-a001-479d-ab3b-32b16742826d-kube-api-access-dq76d\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.414785 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-catalog-content\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.515825 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dq76d\" (UniqueName: \"kubernetes.io/projected/6f30de05-a001-479d-ab3b-32b16742826d-kube-api-access-dq76d\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.515880 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-catalog-content\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.515953 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-utilities\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.516396 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-catalog-content\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.516404 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-utilities\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.533942 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dq76d\" (UniqueName: \"kubernetes.io/projected/6f30de05-a001-479d-ab3b-32b16742826d-kube-api-access-dq76d\") pod \"community-operators-l4jkd\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.652842 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.930517 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l4jkd"] Nov 26 08:02:30 crc kubenswrapper[4940]: I1126 08:02:30.967710 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4jkd" event={"ID":"6f30de05-a001-479d-ab3b-32b16742826d","Type":"ContainerStarted","Data":"cc26c02581d4c5f2714536b8bf64cfee739c1bf29d3719ce31d914db0b74a1b6"} Nov 26 08:02:31 crc kubenswrapper[4940]: I1126 08:02:31.980729 4940 generic.go:334] "Generic (PLEG): container finished" podID="6f30de05-a001-479d-ab3b-32b16742826d" containerID="574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948" exitCode=0 Nov 26 08:02:31 crc kubenswrapper[4940]: I1126 08:02:31.980802 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4jkd" event={"ID":"6f30de05-a001-479d-ab3b-32b16742826d","Type":"ContainerDied","Data":"574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948"} Nov 26 08:02:32 crc kubenswrapper[4940]: I1126 08:02:32.996871 4940 generic.go:334] "Generic (PLEG): container finished" podID="6f30de05-a001-479d-ab3b-32b16742826d" containerID="187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8" exitCode=0 Nov 26 08:02:32 crc kubenswrapper[4940]: I1126 08:02:32.996930 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4jkd" event={"ID":"6f30de05-a001-479d-ab3b-32b16742826d","Type":"ContainerDied","Data":"187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8"} Nov 26 08:02:34 crc kubenswrapper[4940]: I1126 08:02:34.007158 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4jkd" event={"ID":"6f30de05-a001-479d-ab3b-32b16742826d","Type":"ContainerStarted","Data":"a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a"} Nov 26 08:02:34 crc kubenswrapper[4940]: I1126 08:02:34.031983 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l4jkd" podStartSLOduration=2.583662794 podStartE2EDuration="4.03196482s" podCreationTimestamp="2025-11-26 08:02:30 +0000 UTC" firstStartedPulling="2025-11-26 08:02:31.983944233 +0000 UTC m=+4053.504085872" lastFinishedPulling="2025-11-26 08:02:33.432246279 +0000 UTC m=+4054.952387898" observedRunningTime="2025-11-26 08:02:34.029582405 +0000 UTC m=+4055.549724034" watchObservedRunningTime="2025-11-26 08:02:34.03196482 +0000 UTC m=+4055.552106449" Nov 26 08:02:36 crc kubenswrapper[4940]: I1126 08:02:36.164825 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:02:36 crc kubenswrapper[4940]: E1126 08:02:36.165097 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:02:40 crc kubenswrapper[4940]: I1126 08:02:40.653648 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:40 crc kubenswrapper[4940]: I1126 08:02:40.653980 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:40 crc kubenswrapper[4940]: I1126 08:02:40.698984 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:41 crc kubenswrapper[4940]: I1126 08:02:41.098223 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:41 crc kubenswrapper[4940]: I1126 08:02:41.143109 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l4jkd"] Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.079886 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l4jkd" podUID="6f30de05-a001-479d-ab3b-32b16742826d" containerName="registry-server" containerID="cri-o://a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a" gracePeriod=2 Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.502998 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.627067 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dq76d\" (UniqueName: \"kubernetes.io/projected/6f30de05-a001-479d-ab3b-32b16742826d-kube-api-access-dq76d\") pod \"6f30de05-a001-479d-ab3b-32b16742826d\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.627424 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-catalog-content\") pod \"6f30de05-a001-479d-ab3b-32b16742826d\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.627487 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-utilities\") pod \"6f30de05-a001-479d-ab3b-32b16742826d\" (UID: \"6f30de05-a001-479d-ab3b-32b16742826d\") " Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.628788 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-utilities" (OuterVolumeSpecName: "utilities") pod "6f30de05-a001-479d-ab3b-32b16742826d" (UID: "6f30de05-a001-479d-ab3b-32b16742826d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.632381 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f30de05-a001-479d-ab3b-32b16742826d-kube-api-access-dq76d" (OuterVolumeSpecName: "kube-api-access-dq76d") pod "6f30de05-a001-479d-ab3b-32b16742826d" (UID: "6f30de05-a001-479d-ab3b-32b16742826d"). InnerVolumeSpecName "kube-api-access-dq76d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.729007 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.729182 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dq76d\" (UniqueName: \"kubernetes.io/projected/6f30de05-a001-479d-ab3b-32b16742826d-kube-api-access-dq76d\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:43 crc kubenswrapper[4940]: I1126 08:02:43.943084 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f30de05-a001-479d-ab3b-32b16742826d" (UID: "6f30de05-a001-479d-ab3b-32b16742826d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.033359 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f30de05-a001-479d-ab3b-32b16742826d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.103536 4940 generic.go:334] "Generic (PLEG): container finished" podID="6f30de05-a001-479d-ab3b-32b16742826d" containerID="a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a" exitCode=0 Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.103607 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4jkd" event={"ID":"6f30de05-a001-479d-ab3b-32b16742826d","Type":"ContainerDied","Data":"a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a"} Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.103654 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4jkd" event={"ID":"6f30de05-a001-479d-ab3b-32b16742826d","Type":"ContainerDied","Data":"cc26c02581d4c5f2714536b8bf64cfee739c1bf29d3719ce31d914db0b74a1b6"} Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.103662 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4jkd" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.103687 4940 scope.go:117] "RemoveContainer" containerID="a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.145300 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l4jkd"] Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.145769 4940 scope.go:117] "RemoveContainer" containerID="187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.155738 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l4jkd"] Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.167998 4940 scope.go:117] "RemoveContainer" containerID="574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.197092 4940 scope.go:117] "RemoveContainer" containerID="a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a" Nov 26 08:02:44 crc kubenswrapper[4940]: E1126 08:02:44.197575 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a\": container with ID starting with a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a not found: ID does not exist" containerID="a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.197612 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a"} err="failed to get container status \"a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a\": rpc error: code = NotFound desc = could not find container \"a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a\": container with ID starting with a7081bff51d3103886b3407668fb231ce77bab651363dff39e098be14579844a not found: ID does not exist" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.197638 4940 scope.go:117] "RemoveContainer" containerID="187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8" Nov 26 08:02:44 crc kubenswrapper[4940]: E1126 08:02:44.197877 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8\": container with ID starting with 187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8 not found: ID does not exist" containerID="187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.197902 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8"} err="failed to get container status \"187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8\": rpc error: code = NotFound desc = could not find container \"187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8\": container with ID starting with 187362e7e48a3a4b6a750e6e7297b8497c425d4df9af9deba5403f2dc8b118f8 not found: ID does not exist" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.197916 4940 scope.go:117] "RemoveContainer" containerID="574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948" Nov 26 08:02:44 crc kubenswrapper[4940]: E1126 08:02:44.198125 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948\": container with ID starting with 574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948 not found: ID does not exist" containerID="574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948" Nov 26 08:02:44 crc kubenswrapper[4940]: I1126 08:02:44.198151 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948"} err="failed to get container status \"574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948\": rpc error: code = NotFound desc = could not find container \"574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948\": container with ID starting with 574b053609a1b0319d2f5077f0ea65026a5cb132d6e2492628ec8a4e61d5b948 not found: ID does not exist" Nov 26 08:02:45 crc kubenswrapper[4940]: I1126 08:02:45.176822 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f30de05-a001-479d-ab3b-32b16742826d" path="/var/lib/kubelet/pods/6f30de05-a001-479d-ab3b-32b16742826d/volumes" Nov 26 08:02:49 crc kubenswrapper[4940]: I1126 08:02:49.184990 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:02:49 crc kubenswrapper[4940]: E1126 08:02:49.186219 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:03:00 crc kubenswrapper[4940]: I1126 08:03:00.165320 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:03:01 crc kubenswrapper[4940]: I1126 08:03:01.243131 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"b6b5493e7644e04ef0f7c84c42435f9c90ff7f6770b8566503c5bd509d73341d"} Nov 26 08:05:21 crc kubenswrapper[4940]: I1126 08:05:21.728438 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:05:21 crc kubenswrapper[4940]: I1126 08:05:21.729108 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:05:51 crc kubenswrapper[4940]: I1126 08:05:51.728415 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:05:51 crc kubenswrapper[4940]: I1126 08:05:51.728993 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:06:21 crc kubenswrapper[4940]: I1126 08:06:21.729008 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:06:21 crc kubenswrapper[4940]: I1126 08:06:21.729764 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:06:21 crc kubenswrapper[4940]: I1126 08:06:21.729835 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:06:21 crc kubenswrapper[4940]: I1126 08:06:21.730784 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b6b5493e7644e04ef0f7c84c42435f9c90ff7f6770b8566503c5bd509d73341d"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:06:21 crc kubenswrapper[4940]: I1126 08:06:21.730889 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://b6b5493e7644e04ef0f7c84c42435f9c90ff7f6770b8566503c5bd509d73341d" gracePeriod=600 Nov 26 08:06:22 crc kubenswrapper[4940]: I1126 08:06:22.664658 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="b6b5493e7644e04ef0f7c84c42435f9c90ff7f6770b8566503c5bd509d73341d" exitCode=0 Nov 26 08:06:22 crc kubenswrapper[4940]: I1126 08:06:22.664722 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"b6b5493e7644e04ef0f7c84c42435f9c90ff7f6770b8566503c5bd509d73341d"} Nov 26 08:06:22 crc kubenswrapper[4940]: I1126 08:06:22.665007 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d"} Nov 26 08:06:22 crc kubenswrapper[4940]: I1126 08:06:22.665058 4940 scope.go:117] "RemoveContainer" containerID="ee12ccd88627b3ae20a9b06f5402204148bfd7578033456c321235b47f4bbb95" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.540404 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tjq8n"] Nov 26 08:07:32 crc kubenswrapper[4940]: E1126 08:07:32.541306 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f30de05-a001-479d-ab3b-32b16742826d" containerName="extract-utilities" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.541322 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f30de05-a001-479d-ab3b-32b16742826d" containerName="extract-utilities" Nov 26 08:07:32 crc kubenswrapper[4940]: E1126 08:07:32.541342 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f30de05-a001-479d-ab3b-32b16742826d" containerName="extract-content" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.541349 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f30de05-a001-479d-ab3b-32b16742826d" containerName="extract-content" Nov 26 08:07:32 crc kubenswrapper[4940]: E1126 08:07:32.541365 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f30de05-a001-479d-ab3b-32b16742826d" containerName="registry-server" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.541372 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f30de05-a001-479d-ab3b-32b16742826d" containerName="registry-server" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.541590 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f30de05-a001-479d-ab3b-32b16742826d" containerName="registry-server" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.542859 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.555256 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tjq8n"] Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.715441 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-catalog-content\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.715569 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-utilities\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.715623 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f77wr\" (UniqueName: \"kubernetes.io/projected/e285fbe7-945f-4f00-be8d-b6473763d363-kube-api-access-f77wr\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.816427 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-catalog-content\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.816515 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-utilities\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.816559 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f77wr\" (UniqueName: \"kubernetes.io/projected/e285fbe7-945f-4f00-be8d-b6473763d363-kube-api-access-f77wr\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.817233 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-catalog-content\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.817433 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-utilities\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.840344 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f77wr\" (UniqueName: \"kubernetes.io/projected/e285fbe7-945f-4f00-be8d-b6473763d363-kube-api-access-f77wr\") pod \"redhat-operators-tjq8n\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:32 crc kubenswrapper[4940]: I1126 08:07:32.876576 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:33 crc kubenswrapper[4940]: I1126 08:07:33.306925 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tjq8n"] Nov 26 08:07:34 crc kubenswrapper[4940]: I1126 08:07:34.321887 4940 generic.go:334] "Generic (PLEG): container finished" podID="e285fbe7-945f-4f00-be8d-b6473763d363" containerID="e4acff75b567afb4ac18ce72477f23b59ac9e94babbb1c8f95a0021a95d50071" exitCode=0 Nov 26 08:07:34 crc kubenswrapper[4940]: I1126 08:07:34.321982 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjq8n" event={"ID":"e285fbe7-945f-4f00-be8d-b6473763d363","Type":"ContainerDied","Data":"e4acff75b567afb4ac18ce72477f23b59ac9e94babbb1c8f95a0021a95d50071"} Nov 26 08:07:34 crc kubenswrapper[4940]: I1126 08:07:34.322392 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjq8n" event={"ID":"e285fbe7-945f-4f00-be8d-b6473763d363","Type":"ContainerStarted","Data":"672f35a3f506af005c1cc55ee7b2b511dceb22211a9431b089ea0484cf360c50"} Nov 26 08:07:34 crc kubenswrapper[4940]: I1126 08:07:34.324589 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:07:35 crc kubenswrapper[4940]: I1126 08:07:35.330396 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjq8n" event={"ID":"e285fbe7-945f-4f00-be8d-b6473763d363","Type":"ContainerStarted","Data":"d73574bb92f9c2d93ffbba935b80a4d72f8e4bc04e4bd39da709a1814caaf9ce"} Nov 26 08:07:36 crc kubenswrapper[4940]: I1126 08:07:36.345152 4940 generic.go:334] "Generic (PLEG): container finished" podID="e285fbe7-945f-4f00-be8d-b6473763d363" containerID="d73574bb92f9c2d93ffbba935b80a4d72f8e4bc04e4bd39da709a1814caaf9ce" exitCode=0 Nov 26 08:07:36 crc kubenswrapper[4940]: I1126 08:07:36.345244 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjq8n" event={"ID":"e285fbe7-945f-4f00-be8d-b6473763d363","Type":"ContainerDied","Data":"d73574bb92f9c2d93ffbba935b80a4d72f8e4bc04e4bd39da709a1814caaf9ce"} Nov 26 08:07:37 crc kubenswrapper[4940]: I1126 08:07:37.360270 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjq8n" event={"ID":"e285fbe7-945f-4f00-be8d-b6473763d363","Type":"ContainerStarted","Data":"6b0c9fc486ba64520e04dd23847bf7c6aeecb23b2f5a82dbb0b7d73ae496836d"} Nov 26 08:07:37 crc kubenswrapper[4940]: I1126 08:07:37.383720 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tjq8n" podStartSLOduration=2.9446525169999997 podStartE2EDuration="5.383699574s" podCreationTimestamp="2025-11-26 08:07:32 +0000 UTC" firstStartedPulling="2025-11-26 08:07:34.324170185 +0000 UTC m=+4355.844311834" lastFinishedPulling="2025-11-26 08:07:36.763217282 +0000 UTC m=+4358.283358891" observedRunningTime="2025-11-26 08:07:37.381449312 +0000 UTC m=+4358.901590971" watchObservedRunningTime="2025-11-26 08:07:37.383699574 +0000 UTC m=+4358.903841203" Nov 26 08:07:42 crc kubenswrapper[4940]: I1126 08:07:42.876753 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:42 crc kubenswrapper[4940]: I1126 08:07:42.877429 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:42 crc kubenswrapper[4940]: I1126 08:07:42.931090 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:43 crc kubenswrapper[4940]: I1126 08:07:43.472603 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:43 crc kubenswrapper[4940]: I1126 08:07:43.538108 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tjq8n"] Nov 26 08:07:45 crc kubenswrapper[4940]: I1126 08:07:45.429887 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tjq8n" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" containerName="registry-server" containerID="cri-o://6b0c9fc486ba64520e04dd23847bf7c6aeecb23b2f5a82dbb0b7d73ae496836d" gracePeriod=2 Nov 26 08:07:47 crc kubenswrapper[4940]: I1126 08:07:47.456128 4940 generic.go:334] "Generic (PLEG): container finished" podID="e285fbe7-945f-4f00-be8d-b6473763d363" containerID="6b0c9fc486ba64520e04dd23847bf7c6aeecb23b2f5a82dbb0b7d73ae496836d" exitCode=0 Nov 26 08:07:47 crc kubenswrapper[4940]: I1126 08:07:47.456340 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjq8n" event={"ID":"e285fbe7-945f-4f00-be8d-b6473763d363","Type":"ContainerDied","Data":"6b0c9fc486ba64520e04dd23847bf7c6aeecb23b2f5a82dbb0b7d73ae496836d"} Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.111705 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.244873 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-catalog-content\") pod \"e285fbe7-945f-4f00-be8d-b6473763d363\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.244922 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-utilities\") pod \"e285fbe7-945f-4f00-be8d-b6473763d363\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.245322 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f77wr\" (UniqueName: \"kubernetes.io/projected/e285fbe7-945f-4f00-be8d-b6473763d363-kube-api-access-f77wr\") pod \"e285fbe7-945f-4f00-be8d-b6473763d363\" (UID: \"e285fbe7-945f-4f00-be8d-b6473763d363\") " Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.245670 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-utilities" (OuterVolumeSpecName: "utilities") pod "e285fbe7-945f-4f00-be8d-b6473763d363" (UID: "e285fbe7-945f-4f00-be8d-b6473763d363"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.245873 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.250740 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e285fbe7-945f-4f00-be8d-b6473763d363-kube-api-access-f77wr" (OuterVolumeSpecName: "kube-api-access-f77wr") pod "e285fbe7-945f-4f00-be8d-b6473763d363" (UID: "e285fbe7-945f-4f00-be8d-b6473763d363"). InnerVolumeSpecName "kube-api-access-f77wr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.340198 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e285fbe7-945f-4f00-be8d-b6473763d363" (UID: "e285fbe7-945f-4f00-be8d-b6473763d363"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.346783 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f77wr\" (UniqueName: \"kubernetes.io/projected/e285fbe7-945f-4f00-be8d-b6473763d363-kube-api-access-f77wr\") on node \"crc\" DevicePath \"\"" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.346831 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e285fbe7-945f-4f00-be8d-b6473763d363-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.471960 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tjq8n" event={"ID":"e285fbe7-945f-4f00-be8d-b6473763d363","Type":"ContainerDied","Data":"672f35a3f506af005c1cc55ee7b2b511dceb22211a9431b089ea0484cf360c50"} Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.472026 4940 scope.go:117] "RemoveContainer" containerID="6b0c9fc486ba64520e04dd23847bf7c6aeecb23b2f5a82dbb0b7d73ae496836d" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.472114 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tjq8n" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.512393 4940 scope.go:117] "RemoveContainer" containerID="d73574bb92f9c2d93ffbba935b80a4d72f8e4bc04e4bd39da709a1814caaf9ce" Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.518119 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tjq8n"] Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.527554 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tjq8n"] Nov 26 08:07:48 crc kubenswrapper[4940]: I1126 08:07:48.542234 4940 scope.go:117] "RemoveContainer" containerID="e4acff75b567afb4ac18ce72477f23b59ac9e94babbb1c8f95a0021a95d50071" Nov 26 08:07:49 crc kubenswrapper[4940]: I1126 08:07:49.175755 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" path="/var/lib/kubelet/pods/e285fbe7-945f-4f00-be8d-b6473763d363/volumes" Nov 26 08:08:51 crc kubenswrapper[4940]: I1126 08:08:51.729204 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:08:51 crc kubenswrapper[4940]: I1126 08:08:51.732237 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:09:21 crc kubenswrapper[4940]: I1126 08:09:21.728285 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:09:21 crc kubenswrapper[4940]: I1126 08:09:21.728977 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:09:51 crc kubenswrapper[4940]: I1126 08:09:51.727924 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:09:51 crc kubenswrapper[4940]: I1126 08:09:51.728675 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:09:51 crc kubenswrapper[4940]: I1126 08:09:51.728737 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:09:51 crc kubenswrapper[4940]: I1126 08:09:51.729512 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:09:51 crc kubenswrapper[4940]: I1126 08:09:51.729591 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" gracePeriod=600 Nov 26 08:09:51 crc kubenswrapper[4940]: E1126 08:09:51.855318 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:09:52 crc kubenswrapper[4940]: I1126 08:09:52.539961 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" exitCode=0 Nov 26 08:09:52 crc kubenswrapper[4940]: I1126 08:09:52.540008 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d"} Nov 26 08:09:52 crc kubenswrapper[4940]: I1126 08:09:52.540815 4940 scope.go:117] "RemoveContainer" containerID="b6b5493e7644e04ef0f7c84c42435f9c90ff7f6770b8566503c5bd509d73341d" Nov 26 08:09:52 crc kubenswrapper[4940]: I1126 08:09:52.541848 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:09:52 crc kubenswrapper[4940]: E1126 08:09:52.542303 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:10:05 crc kubenswrapper[4940]: I1126 08:10:05.166022 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:10:05 crc kubenswrapper[4940]: E1126 08:10:05.167475 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:10:17 crc kubenswrapper[4940]: I1126 08:10:17.165276 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:10:17 crc kubenswrapper[4940]: E1126 08:10:17.166264 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:10:28 crc kubenswrapper[4940]: I1126 08:10:28.165620 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:10:28 crc kubenswrapper[4940]: E1126 08:10:28.166554 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:10:41 crc kubenswrapper[4940]: I1126 08:10:41.165889 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:10:41 crc kubenswrapper[4940]: E1126 08:10:41.166617 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:10:52 crc kubenswrapper[4940]: I1126 08:10:52.165895 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:10:52 crc kubenswrapper[4940]: E1126 08:10:52.166697 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:11:03 crc kubenswrapper[4940]: I1126 08:11:03.165908 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:11:03 crc kubenswrapper[4940]: E1126 08:11:03.167798 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.609717 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gtcg2"] Nov 26 08:11:07 crc kubenswrapper[4940]: E1126 08:11:07.610607 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" containerName="registry-server" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.610625 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" containerName="registry-server" Nov 26 08:11:07 crc kubenswrapper[4940]: E1126 08:11:07.610645 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" containerName="extract-utilities" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.610654 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" containerName="extract-utilities" Nov 26 08:11:07 crc kubenswrapper[4940]: E1126 08:11:07.610669 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" containerName="extract-content" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.610677 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" containerName="extract-content" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.610847 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e285fbe7-945f-4f00-be8d-b6473763d363" containerName="registry-server" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.612199 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.626468 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gtcg2"] Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.671232 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj2nz\" (UniqueName: \"kubernetes.io/projected/2e2073c6-124a-4449-88c9-eea21832a7d9-kube-api-access-zj2nz\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.671398 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-utilities\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.675248 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-catalog-content\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.776450 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-utilities\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.776536 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-catalog-content\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.776643 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zj2nz\" (UniqueName: \"kubernetes.io/projected/2e2073c6-124a-4449-88c9-eea21832a7d9-kube-api-access-zj2nz\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.777467 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-catalog-content\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.777614 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-utilities\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.806920 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zj2nz\" (UniqueName: \"kubernetes.io/projected/2e2073c6-124a-4449-88c9-eea21832a7d9-kube-api-access-zj2nz\") pod \"certified-operators-gtcg2\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:07 crc kubenswrapper[4940]: I1126 08:11:07.938665 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:08 crc kubenswrapper[4940]: I1126 08:11:08.408998 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gtcg2"] Nov 26 08:11:09 crc kubenswrapper[4940]: I1126 08:11:09.258513 4940 generic.go:334] "Generic (PLEG): container finished" podID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerID="bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905" exitCode=0 Nov 26 08:11:09 crc kubenswrapper[4940]: I1126 08:11:09.258592 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gtcg2" event={"ID":"2e2073c6-124a-4449-88c9-eea21832a7d9","Type":"ContainerDied","Data":"bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905"} Nov 26 08:11:09 crc kubenswrapper[4940]: I1126 08:11:09.258815 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gtcg2" event={"ID":"2e2073c6-124a-4449-88c9-eea21832a7d9","Type":"ContainerStarted","Data":"966bccdf6f1856042bf51a5149071733d1b9bd4673a35e94f0027eb672b81e75"} Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.271550 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gtcg2" event={"ID":"2e2073c6-124a-4449-88c9-eea21832a7d9","Type":"ContainerStarted","Data":"cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d"} Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.420340 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qql2w"] Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.422702 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.434694 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qql2w"] Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.519404 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-utilities\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.519444 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w46fg\" (UniqueName: \"kubernetes.io/projected/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-kube-api-access-w46fg\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.519541 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-catalog-content\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.620334 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-utilities\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.620381 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w46fg\" (UniqueName: \"kubernetes.io/projected/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-kube-api-access-w46fg\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.620441 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-catalog-content\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.620993 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-catalog-content\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.621117 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-utilities\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.641892 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w46fg\" (UniqueName: \"kubernetes.io/projected/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-kube-api-access-w46fg\") pod \"redhat-marketplace-qql2w\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:10 crc kubenswrapper[4940]: I1126 08:11:10.746817 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:11 crc kubenswrapper[4940]: I1126 08:11:11.152808 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qql2w"] Nov 26 08:11:11 crc kubenswrapper[4940]: W1126 08:11:11.158882 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod151e2fdb_89cc_4686_88c8_1fefe3e42c0b.slice/crio-47d2316e84e4a7d4841f7d27cc257fb59d858bb6f64bb5263b317360c42c93db WatchSource:0}: Error finding container 47d2316e84e4a7d4841f7d27cc257fb59d858bb6f64bb5263b317360c42c93db: Status 404 returned error can't find the container with id 47d2316e84e4a7d4841f7d27cc257fb59d858bb6f64bb5263b317360c42c93db Nov 26 08:11:11 crc kubenswrapper[4940]: I1126 08:11:11.279957 4940 generic.go:334] "Generic (PLEG): container finished" podID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerID="cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d" exitCode=0 Nov 26 08:11:11 crc kubenswrapper[4940]: I1126 08:11:11.280005 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gtcg2" event={"ID":"2e2073c6-124a-4449-88c9-eea21832a7d9","Type":"ContainerDied","Data":"cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d"} Nov 26 08:11:11 crc kubenswrapper[4940]: I1126 08:11:11.282116 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qql2w" event={"ID":"151e2fdb-89cc-4686-88c8-1fefe3e42c0b","Type":"ContainerStarted","Data":"47d2316e84e4a7d4841f7d27cc257fb59d858bb6f64bb5263b317360c42c93db"} Nov 26 08:11:12 crc kubenswrapper[4940]: I1126 08:11:12.291362 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gtcg2" event={"ID":"2e2073c6-124a-4449-88c9-eea21832a7d9","Type":"ContainerStarted","Data":"5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2"} Nov 26 08:11:12 crc kubenswrapper[4940]: I1126 08:11:12.293109 4940 generic.go:334] "Generic (PLEG): container finished" podID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerID="defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15" exitCode=0 Nov 26 08:11:12 crc kubenswrapper[4940]: I1126 08:11:12.293144 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qql2w" event={"ID":"151e2fdb-89cc-4686-88c8-1fefe3e42c0b","Type":"ContainerDied","Data":"defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15"} Nov 26 08:11:12 crc kubenswrapper[4940]: I1126 08:11:12.316258 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gtcg2" podStartSLOduration=2.909464481 podStartE2EDuration="5.316236004s" podCreationTimestamp="2025-11-26 08:11:07 +0000 UTC" firstStartedPulling="2025-11-26 08:11:09.260538808 +0000 UTC m=+4570.780680427" lastFinishedPulling="2025-11-26 08:11:11.667310331 +0000 UTC m=+4573.187451950" observedRunningTime="2025-11-26 08:11:12.309226752 +0000 UTC m=+4573.829368451" watchObservedRunningTime="2025-11-26 08:11:12.316236004 +0000 UTC m=+4573.836377623" Nov 26 08:11:13 crc kubenswrapper[4940]: I1126 08:11:13.306193 4940 generic.go:334] "Generic (PLEG): container finished" podID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerID="84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab" exitCode=0 Nov 26 08:11:13 crc kubenswrapper[4940]: I1126 08:11:13.306238 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qql2w" event={"ID":"151e2fdb-89cc-4686-88c8-1fefe3e42c0b","Type":"ContainerDied","Data":"84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab"} Nov 26 08:11:14 crc kubenswrapper[4940]: I1126 08:11:14.320969 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qql2w" event={"ID":"151e2fdb-89cc-4686-88c8-1fefe3e42c0b","Type":"ContainerStarted","Data":"403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9"} Nov 26 08:11:14 crc kubenswrapper[4940]: I1126 08:11:14.347812 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qql2w" podStartSLOduration=2.946226078 podStartE2EDuration="4.347791416s" podCreationTimestamp="2025-11-26 08:11:10 +0000 UTC" firstStartedPulling="2025-11-26 08:11:12.294238456 +0000 UTC m=+4573.814380075" lastFinishedPulling="2025-11-26 08:11:13.695803794 +0000 UTC m=+4575.215945413" observedRunningTime="2025-11-26 08:11:14.34069839 +0000 UTC m=+4575.860840009" watchObservedRunningTime="2025-11-26 08:11:14.347791416 +0000 UTC m=+4575.867933035" Nov 26 08:11:15 crc kubenswrapper[4940]: I1126 08:11:15.166095 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:11:15 crc kubenswrapper[4940]: E1126 08:11:15.166719 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:11:17 crc kubenswrapper[4940]: I1126 08:11:17.939278 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:17 crc kubenswrapper[4940]: I1126 08:11:17.939624 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:17 crc kubenswrapper[4940]: I1126 08:11:17.982183 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:18 crc kubenswrapper[4940]: I1126 08:11:18.421587 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:18 crc kubenswrapper[4940]: I1126 08:11:18.474958 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gtcg2"] Nov 26 08:11:20 crc kubenswrapper[4940]: I1126 08:11:20.371616 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gtcg2" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerName="registry-server" containerID="cri-o://5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2" gracePeriod=2 Nov 26 08:11:20 crc kubenswrapper[4940]: I1126 08:11:20.747571 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:20 crc kubenswrapper[4940]: I1126 08:11:20.747781 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:20 crc kubenswrapper[4940]: I1126 08:11:20.797083 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.255383 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.381630 4940 generic.go:334] "Generic (PLEG): container finished" podID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerID="5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2" exitCode=0 Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.381708 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gtcg2" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.381722 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gtcg2" event={"ID":"2e2073c6-124a-4449-88c9-eea21832a7d9","Type":"ContainerDied","Data":"5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2"} Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.381801 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gtcg2" event={"ID":"2e2073c6-124a-4449-88c9-eea21832a7d9","Type":"ContainerDied","Data":"966bccdf6f1856042bf51a5149071733d1b9bd4673a35e94f0027eb672b81e75"} Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.381823 4940 scope.go:117] "RemoveContainer" containerID="5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.383376 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zj2nz\" (UniqueName: \"kubernetes.io/projected/2e2073c6-124a-4449-88c9-eea21832a7d9-kube-api-access-zj2nz\") pod \"2e2073c6-124a-4449-88c9-eea21832a7d9\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.383549 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-catalog-content\") pod \"2e2073c6-124a-4449-88c9-eea21832a7d9\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.383629 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-utilities\") pod \"2e2073c6-124a-4449-88c9-eea21832a7d9\" (UID: \"2e2073c6-124a-4449-88c9-eea21832a7d9\") " Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.384678 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-utilities" (OuterVolumeSpecName: "utilities") pod "2e2073c6-124a-4449-88c9-eea21832a7d9" (UID: "2e2073c6-124a-4449-88c9-eea21832a7d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.390762 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e2073c6-124a-4449-88c9-eea21832a7d9-kube-api-access-zj2nz" (OuterVolumeSpecName: "kube-api-access-zj2nz") pod "2e2073c6-124a-4449-88c9-eea21832a7d9" (UID: "2e2073c6-124a-4449-88c9-eea21832a7d9"). InnerVolumeSpecName "kube-api-access-zj2nz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.433878 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e2073c6-124a-4449-88c9-eea21832a7d9" (UID: "2e2073c6-124a-4449-88c9-eea21832a7d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.438268 4940 scope.go:117] "RemoveContainer" containerID="cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.445952 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.467794 4940 scope.go:117] "RemoveContainer" containerID="bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.485809 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.485842 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zj2nz\" (UniqueName: \"kubernetes.io/projected/2e2073c6-124a-4449-88c9-eea21832a7d9-kube-api-access-zj2nz\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.485854 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e2073c6-124a-4449-88c9-eea21832a7d9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.490935 4940 scope.go:117] "RemoveContainer" containerID="5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2" Nov 26 08:11:21 crc kubenswrapper[4940]: E1126 08:11:21.491404 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2\": container with ID starting with 5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2 not found: ID does not exist" containerID="5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.491450 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2"} err="failed to get container status \"5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2\": rpc error: code = NotFound desc = could not find container \"5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2\": container with ID starting with 5bef53b5986f37ea045402221ad0b4d21894af314e135a9f8cd2d846ebf74cf2 not found: ID does not exist" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.491480 4940 scope.go:117] "RemoveContainer" containerID="cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d" Nov 26 08:11:21 crc kubenswrapper[4940]: E1126 08:11:21.491913 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d\": container with ID starting with cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d not found: ID does not exist" containerID="cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.491955 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d"} err="failed to get container status \"cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d\": rpc error: code = NotFound desc = could not find container \"cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d\": container with ID starting with cc3e9877fdf5cf42979f472739fafc352bf49b56980ffa34be8c5d95dcf24c4d not found: ID does not exist" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.491974 4940 scope.go:117] "RemoveContainer" containerID="bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905" Nov 26 08:11:21 crc kubenswrapper[4940]: E1126 08:11:21.492275 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905\": container with ID starting with bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905 not found: ID does not exist" containerID="bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.492307 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905"} err="failed to get container status \"bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905\": rpc error: code = NotFound desc = could not find container \"bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905\": container with ID starting with bbc336627ead8f091b4fef63b5b320bd820677e2bc24747621bb4ccbb9682905 not found: ID does not exist" Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.715643 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gtcg2"] Nov 26 08:11:21 crc kubenswrapper[4940]: I1126 08:11:21.720958 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gtcg2"] Nov 26 08:11:23 crc kubenswrapper[4940]: I1126 08:11:23.173680 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" path="/var/lib/kubelet/pods/2e2073c6-124a-4449-88c9-eea21832a7d9/volumes" Nov 26 08:11:23 crc kubenswrapper[4940]: I1126 08:11:23.431559 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qql2w"] Nov 26 08:11:24 crc kubenswrapper[4940]: I1126 08:11:24.416146 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qql2w" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerName="registry-server" containerID="cri-o://403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9" gracePeriod=2 Nov 26 08:11:24 crc kubenswrapper[4940]: I1126 08:11:24.903711 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.052180 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-catalog-content\") pod \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.052309 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-utilities\") pod \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.052381 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w46fg\" (UniqueName: \"kubernetes.io/projected/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-kube-api-access-w46fg\") pod \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\" (UID: \"151e2fdb-89cc-4686-88c8-1fefe3e42c0b\") " Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.054065 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-utilities" (OuterVolumeSpecName: "utilities") pod "151e2fdb-89cc-4686-88c8-1fefe3e42c0b" (UID: "151e2fdb-89cc-4686-88c8-1fefe3e42c0b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.058181 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-kube-api-access-w46fg" (OuterVolumeSpecName: "kube-api-access-w46fg") pod "151e2fdb-89cc-4686-88c8-1fefe3e42c0b" (UID: "151e2fdb-89cc-4686-88c8-1fefe3e42c0b"). InnerVolumeSpecName "kube-api-access-w46fg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.078665 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "151e2fdb-89cc-4686-88c8-1fefe3e42c0b" (UID: "151e2fdb-89cc-4686-88c8-1fefe3e42c0b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.153910 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.153964 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w46fg\" (UniqueName: \"kubernetes.io/projected/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-kube-api-access-w46fg\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.153977 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/151e2fdb-89cc-4686-88c8-1fefe3e42c0b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.429491 4940 generic.go:334] "Generic (PLEG): container finished" podID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerID="403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9" exitCode=0 Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.429549 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qql2w" event={"ID":"151e2fdb-89cc-4686-88c8-1fefe3e42c0b","Type":"ContainerDied","Data":"403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9"} Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.429860 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qql2w" event={"ID":"151e2fdb-89cc-4686-88c8-1fefe3e42c0b","Type":"ContainerDied","Data":"47d2316e84e4a7d4841f7d27cc257fb59d858bb6f64bb5263b317360c42c93db"} Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.429661 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qql2w" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.429887 4940 scope.go:117] "RemoveContainer" containerID="403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.464395 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qql2w"] Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.470592 4940 scope.go:117] "RemoveContainer" containerID="84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.471655 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qql2w"] Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.505329 4940 scope.go:117] "RemoveContainer" containerID="defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.535056 4940 scope.go:117] "RemoveContainer" containerID="403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9" Nov 26 08:11:25 crc kubenswrapper[4940]: E1126 08:11:25.535652 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9\": container with ID starting with 403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9 not found: ID does not exist" containerID="403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.535725 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9"} err="failed to get container status \"403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9\": rpc error: code = NotFound desc = could not find container \"403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9\": container with ID starting with 403a913146810a4c172a4e0596d4c266e072a6cc698beaf33b3e79d298278de9 not found: ID does not exist" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.535781 4940 scope.go:117] "RemoveContainer" containerID="84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab" Nov 26 08:11:25 crc kubenswrapper[4940]: E1126 08:11:25.536195 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab\": container with ID starting with 84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab not found: ID does not exist" containerID="84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.536233 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab"} err="failed to get container status \"84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab\": rpc error: code = NotFound desc = could not find container \"84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab\": container with ID starting with 84ec5730d9ca87f38acfb11fcf58112de6011b9a8d1d67d6497a4946964d2aab not found: ID does not exist" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.536264 4940 scope.go:117] "RemoveContainer" containerID="defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15" Nov 26 08:11:25 crc kubenswrapper[4940]: E1126 08:11:25.536759 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15\": container with ID starting with defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15 not found: ID does not exist" containerID="defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15" Nov 26 08:11:25 crc kubenswrapper[4940]: I1126 08:11:25.536807 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15"} err="failed to get container status \"defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15\": rpc error: code = NotFound desc = could not find container \"defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15\": container with ID starting with defc686ec1555c232d51b494dd25897f8f02564dc1f7aae58c33ccf0c6924c15 not found: ID does not exist" Nov 26 08:11:27 crc kubenswrapper[4940]: I1126 08:11:27.174823 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" path="/var/lib/kubelet/pods/151e2fdb-89cc-4686-88c8-1fefe3e42c0b/volumes" Nov 26 08:11:29 crc kubenswrapper[4940]: I1126 08:11:29.170165 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:11:29 crc kubenswrapper[4940]: E1126 08:11:29.170695 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:11:42 crc kubenswrapper[4940]: I1126 08:11:42.165581 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:11:42 crc kubenswrapper[4940]: E1126 08:11:42.166475 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:11:57 crc kubenswrapper[4940]: I1126 08:11:57.166668 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:11:57 crc kubenswrapper[4940]: E1126 08:11:57.168206 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:12:09 crc kubenswrapper[4940]: I1126 08:12:09.169841 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:12:09 crc kubenswrapper[4940]: E1126 08:12:09.170553 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:12:21 crc kubenswrapper[4940]: I1126 08:12:21.169542 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:12:21 crc kubenswrapper[4940]: E1126 08:12:21.170522 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:12:32 crc kubenswrapper[4940]: I1126 08:12:32.167134 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:12:32 crc kubenswrapper[4940]: E1126 08:12:32.168448 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:12:46 crc kubenswrapper[4940]: I1126 08:12:46.164879 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:12:46 crc kubenswrapper[4940]: E1126 08:12:46.165611 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:13:01 crc kubenswrapper[4940]: I1126 08:13:01.165777 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:13:01 crc kubenswrapper[4940]: E1126 08:13:01.166530 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:13:13 crc kubenswrapper[4940]: I1126 08:13:13.165821 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:13:13 crc kubenswrapper[4940]: E1126 08:13:13.166489 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:13:27 crc kubenswrapper[4940]: I1126 08:13:27.166232 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:13:27 crc kubenswrapper[4940]: E1126 08:13:27.167797 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:13:42 crc kubenswrapper[4940]: I1126 08:13:42.165403 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:13:42 crc kubenswrapper[4940]: E1126 08:13:42.166090 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:13:54 crc kubenswrapper[4940]: I1126 08:13:54.165885 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:13:54 crc kubenswrapper[4940]: E1126 08:13:54.166728 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:14:06 crc kubenswrapper[4940]: I1126 08:14:06.166100 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:14:06 crc kubenswrapper[4940]: E1126 08:14:06.166813 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:14:18 crc kubenswrapper[4940]: I1126 08:14:18.165717 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:14:18 crc kubenswrapper[4940]: E1126 08:14:18.166489 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:14:31 crc kubenswrapper[4940]: I1126 08:14:31.166521 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:14:31 crc kubenswrapper[4940]: E1126 08:14:31.167324 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.563514 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b995f"] Nov 26 08:14:35 crc kubenswrapper[4940]: E1126 08:14:35.564557 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerName="registry-server" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.564584 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerName="registry-server" Nov 26 08:14:35 crc kubenswrapper[4940]: E1126 08:14:35.564608 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerName="registry-server" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.564621 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerName="registry-server" Nov 26 08:14:35 crc kubenswrapper[4940]: E1126 08:14:35.564654 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerName="extract-content" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.564668 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerName="extract-content" Nov 26 08:14:35 crc kubenswrapper[4940]: E1126 08:14:35.564692 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerName="extract-content" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.564706 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerName="extract-content" Nov 26 08:14:35 crc kubenswrapper[4940]: E1126 08:14:35.564734 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerName="extract-utilities" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.564746 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerName="extract-utilities" Nov 26 08:14:35 crc kubenswrapper[4940]: E1126 08:14:35.564787 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerName="extract-utilities" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.564800 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerName="extract-utilities" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.565109 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e2073c6-124a-4449-88c9-eea21832a7d9" containerName="registry-server" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.565130 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="151e2fdb-89cc-4686-88c8-1fefe3e42c0b" containerName="registry-server" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.566979 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.580077 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b995f"] Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.612107 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-catalog-content\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.612445 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-utilities\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.612585 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djxw6\" (UniqueName: \"kubernetes.io/projected/a7247bda-a0ef-4d33-b90f-6e842e611c9e-kube-api-access-djxw6\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.713944 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djxw6\" (UniqueName: \"kubernetes.io/projected/a7247bda-a0ef-4d33-b90f-6e842e611c9e-kube-api-access-djxw6\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.714049 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-catalog-content\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.714086 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-utilities\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.714638 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-utilities\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.714814 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-catalog-content\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.734794 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djxw6\" (UniqueName: \"kubernetes.io/projected/a7247bda-a0ef-4d33-b90f-6e842e611c9e-kube-api-access-djxw6\") pod \"community-operators-b995f\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:35 crc kubenswrapper[4940]: I1126 08:14:35.900624 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:36 crc kubenswrapper[4940]: I1126 08:14:36.386010 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b995f"] Nov 26 08:14:36 crc kubenswrapper[4940]: W1126 08:14:36.689921 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda7247bda_a0ef_4d33_b90f_6e842e611c9e.slice/crio-9d17e93207a07e39e64d6819412dab535df67918dd6edf7497f8523b9972b07a WatchSource:0}: Error finding container 9d17e93207a07e39e64d6819412dab535df67918dd6edf7497f8523b9972b07a: Status 404 returned error can't find the container with id 9d17e93207a07e39e64d6819412dab535df67918dd6edf7497f8523b9972b07a Nov 26 08:14:37 crc kubenswrapper[4940]: I1126 08:14:37.115317 4940 generic.go:334] "Generic (PLEG): container finished" podID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerID="ea894a99400fa51f1b6c8afedf15892a0bc1f3c56d78b3f5229539de7bda6c8d" exitCode=0 Nov 26 08:14:37 crc kubenswrapper[4940]: I1126 08:14:37.115462 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b995f" event={"ID":"a7247bda-a0ef-4d33-b90f-6e842e611c9e","Type":"ContainerDied","Data":"ea894a99400fa51f1b6c8afedf15892a0bc1f3c56d78b3f5229539de7bda6c8d"} Nov 26 08:14:37 crc kubenswrapper[4940]: I1126 08:14:37.115720 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b995f" event={"ID":"a7247bda-a0ef-4d33-b90f-6e842e611c9e","Type":"ContainerStarted","Data":"9d17e93207a07e39e64d6819412dab535df67918dd6edf7497f8523b9972b07a"} Nov 26 08:14:37 crc kubenswrapper[4940]: I1126 08:14:37.119276 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:14:38 crc kubenswrapper[4940]: I1126 08:14:38.131760 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b995f" event={"ID":"a7247bda-a0ef-4d33-b90f-6e842e611c9e","Type":"ContainerStarted","Data":"929370e9b8825b47ba492bf93125442453a1db3ae5c14ce12d5f2dc2dd4fe052"} Nov 26 08:14:39 crc kubenswrapper[4940]: I1126 08:14:39.142856 4940 generic.go:334] "Generic (PLEG): container finished" podID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerID="929370e9b8825b47ba492bf93125442453a1db3ae5c14ce12d5f2dc2dd4fe052" exitCode=0 Nov 26 08:14:39 crc kubenswrapper[4940]: I1126 08:14:39.142916 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b995f" event={"ID":"a7247bda-a0ef-4d33-b90f-6e842e611c9e","Type":"ContainerDied","Data":"929370e9b8825b47ba492bf93125442453a1db3ae5c14ce12d5f2dc2dd4fe052"} Nov 26 08:14:40 crc kubenswrapper[4940]: I1126 08:14:40.157427 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b995f" event={"ID":"a7247bda-a0ef-4d33-b90f-6e842e611c9e","Type":"ContainerStarted","Data":"df50db4fe43fb553813c942040b3cce7d6377920d5e98c97f39074af121f58d3"} Nov 26 08:14:40 crc kubenswrapper[4940]: I1126 08:14:40.180618 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b995f" podStartSLOduration=2.721720617 podStartE2EDuration="5.180596723s" podCreationTimestamp="2025-11-26 08:14:35 +0000 UTC" firstStartedPulling="2025-11-26 08:14:37.118878512 +0000 UTC m=+4778.639020141" lastFinishedPulling="2025-11-26 08:14:39.577754608 +0000 UTC m=+4781.097896247" observedRunningTime="2025-11-26 08:14:40.178267638 +0000 UTC m=+4781.698409307" watchObservedRunningTime="2025-11-26 08:14:40.180596723 +0000 UTC m=+4781.700738352" Nov 26 08:14:44 crc kubenswrapper[4940]: I1126 08:14:44.166066 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:14:44 crc kubenswrapper[4940]: E1126 08:14:44.166559 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:14:45 crc kubenswrapper[4940]: I1126 08:14:45.901537 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:45 crc kubenswrapper[4940]: I1126 08:14:45.901612 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:45 crc kubenswrapper[4940]: I1126 08:14:45.988119 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:46 crc kubenswrapper[4940]: I1126 08:14:46.258251 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:46 crc kubenswrapper[4940]: I1126 08:14:46.303610 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b995f"] Nov 26 08:14:48 crc kubenswrapper[4940]: I1126 08:14:48.228939 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b995f" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerName="registry-server" containerID="cri-o://df50db4fe43fb553813c942040b3cce7d6377920d5e98c97f39074af121f58d3" gracePeriod=2 Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.239727 4940 generic.go:334] "Generic (PLEG): container finished" podID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerID="df50db4fe43fb553813c942040b3cce7d6377920d5e98c97f39074af121f58d3" exitCode=0 Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.239807 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b995f" event={"ID":"a7247bda-a0ef-4d33-b90f-6e842e611c9e","Type":"ContainerDied","Data":"df50db4fe43fb553813c942040b3cce7d6377920d5e98c97f39074af121f58d3"} Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.569009 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.637357 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-utilities\") pod \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.637434 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-catalog-content\") pod \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.637485 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djxw6\" (UniqueName: \"kubernetes.io/projected/a7247bda-a0ef-4d33-b90f-6e842e611c9e-kube-api-access-djxw6\") pod \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\" (UID: \"a7247bda-a0ef-4d33-b90f-6e842e611c9e\") " Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.638755 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-utilities" (OuterVolumeSpecName: "utilities") pod "a7247bda-a0ef-4d33-b90f-6e842e611c9e" (UID: "a7247bda-a0ef-4d33-b90f-6e842e611c9e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.643932 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7247bda-a0ef-4d33-b90f-6e842e611c9e-kube-api-access-djxw6" (OuterVolumeSpecName: "kube-api-access-djxw6") pod "a7247bda-a0ef-4d33-b90f-6e842e611c9e" (UID: "a7247bda-a0ef-4d33-b90f-6e842e611c9e"). InnerVolumeSpecName "kube-api-access-djxw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.703912 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7247bda-a0ef-4d33-b90f-6e842e611c9e" (UID: "a7247bda-a0ef-4d33-b90f-6e842e611c9e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.738749 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.738782 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djxw6\" (UniqueName: \"kubernetes.io/projected/a7247bda-a0ef-4d33-b90f-6e842e611c9e-kube-api-access-djxw6\") on node \"crc\" DevicePath \"\"" Nov 26 08:14:49 crc kubenswrapper[4940]: I1126 08:14:49.738801 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7247bda-a0ef-4d33-b90f-6e842e611c9e-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:14:50 crc kubenswrapper[4940]: I1126 08:14:50.254385 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b995f" event={"ID":"a7247bda-a0ef-4d33-b90f-6e842e611c9e","Type":"ContainerDied","Data":"9d17e93207a07e39e64d6819412dab535df67918dd6edf7497f8523b9972b07a"} Nov 26 08:14:50 crc kubenswrapper[4940]: I1126 08:14:50.254442 4940 scope.go:117] "RemoveContainer" containerID="df50db4fe43fb553813c942040b3cce7d6377920d5e98c97f39074af121f58d3" Nov 26 08:14:50 crc kubenswrapper[4940]: I1126 08:14:50.254579 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b995f" Nov 26 08:14:50 crc kubenswrapper[4940]: I1126 08:14:50.294199 4940 scope.go:117] "RemoveContainer" containerID="929370e9b8825b47ba492bf93125442453a1db3ae5c14ce12d5f2dc2dd4fe052" Nov 26 08:14:50 crc kubenswrapper[4940]: I1126 08:14:50.297627 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b995f"] Nov 26 08:14:50 crc kubenswrapper[4940]: I1126 08:14:50.311445 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b995f"] Nov 26 08:14:50 crc kubenswrapper[4940]: I1126 08:14:50.319483 4940 scope.go:117] "RemoveContainer" containerID="ea894a99400fa51f1b6c8afedf15892a0bc1f3c56d78b3f5229539de7bda6c8d" Nov 26 08:14:51 crc kubenswrapper[4940]: I1126 08:14:51.176362 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" path="/var/lib/kubelet/pods/a7247bda-a0ef-4d33-b90f-6e842e611c9e/volumes" Nov 26 08:14:58 crc kubenswrapper[4940]: I1126 08:14:58.165948 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:14:58 crc kubenswrapper[4940]: I1126 08:14:58.336513 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"172ae1663b76c995626df76eac8bea676d78474fe1219d39d4996d31e4d42e86"} Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.150278 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9"] Nov 26 08:15:00 crc kubenswrapper[4940]: E1126 08:15:00.151311 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerName="extract-utilities" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.151336 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerName="extract-utilities" Nov 26 08:15:00 crc kubenswrapper[4940]: E1126 08:15:00.151382 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerName="extract-content" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.151393 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerName="extract-content" Nov 26 08:15:00 crc kubenswrapper[4940]: E1126 08:15:00.151425 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerName="registry-server" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.151434 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerName="registry-server" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.151665 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7247bda-a0ef-4d33-b90f-6e842e611c9e" containerName="registry-server" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.152505 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.155360 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.156683 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.159252 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9"] Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.297822 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btmx7\" (UniqueName: \"kubernetes.io/projected/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-kube-api-access-btmx7\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.298262 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-config-volume\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.298504 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-secret-volume\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.400221 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-secret-volume\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.400331 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btmx7\" (UniqueName: \"kubernetes.io/projected/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-kube-api-access-btmx7\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.400387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-config-volume\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.403232 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-config-volume\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.406737 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-secret-volume\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.418011 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btmx7\" (UniqueName: \"kubernetes.io/projected/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-kube-api-access-btmx7\") pod \"collect-profiles-29402415-w8vq9\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.512740 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:00 crc kubenswrapper[4940]: I1126 08:15:00.928516 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9"] Nov 26 08:15:00 crc kubenswrapper[4940]: W1126 08:15:00.985657 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod96dbb1d2_519a_44b5_a49e_fc94031dd3e8.slice/crio-bc0b019e4c4ac5afa2791300b231dcb9f9c46c0bb0c95ad74d88358dd37a012f WatchSource:0}: Error finding container bc0b019e4c4ac5afa2791300b231dcb9f9c46c0bb0c95ad74d88358dd37a012f: Status 404 returned error can't find the container with id bc0b019e4c4ac5afa2791300b231dcb9f9c46c0bb0c95ad74d88358dd37a012f Nov 26 08:15:01 crc kubenswrapper[4940]: I1126 08:15:01.362667 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" event={"ID":"96dbb1d2-519a-44b5-a49e-fc94031dd3e8","Type":"ContainerStarted","Data":"892e5fda5519e49768e5c5628cc8e72bd1d761010dd5ae9fcc0ad310fab29b89"} Nov 26 08:15:01 crc kubenswrapper[4940]: I1126 08:15:01.363005 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" event={"ID":"96dbb1d2-519a-44b5-a49e-fc94031dd3e8","Type":"ContainerStarted","Data":"bc0b019e4c4ac5afa2791300b231dcb9f9c46c0bb0c95ad74d88358dd37a012f"} Nov 26 08:15:01 crc kubenswrapper[4940]: I1126 08:15:01.381212 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" podStartSLOduration=1.381195414 podStartE2EDuration="1.381195414s" podCreationTimestamp="2025-11-26 08:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:15:01.376395392 +0000 UTC m=+4802.896537011" watchObservedRunningTime="2025-11-26 08:15:01.381195414 +0000 UTC m=+4802.901337033" Nov 26 08:15:02 crc kubenswrapper[4940]: I1126 08:15:02.376616 4940 generic.go:334] "Generic (PLEG): container finished" podID="96dbb1d2-519a-44b5-a49e-fc94031dd3e8" containerID="892e5fda5519e49768e5c5628cc8e72bd1d761010dd5ae9fcc0ad310fab29b89" exitCode=0 Nov 26 08:15:02 crc kubenswrapper[4940]: I1126 08:15:02.376722 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" event={"ID":"96dbb1d2-519a-44b5-a49e-fc94031dd3e8","Type":"ContainerDied","Data":"892e5fda5519e49768e5c5628cc8e72bd1d761010dd5ae9fcc0ad310fab29b89"} Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.646508 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.748656 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-config-volume\") pod \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.748862 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-secret-volume\") pod \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.748915 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btmx7\" (UniqueName: \"kubernetes.io/projected/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-kube-api-access-btmx7\") pod \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\" (UID: \"96dbb1d2-519a-44b5-a49e-fc94031dd3e8\") " Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.749479 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-config-volume" (OuterVolumeSpecName: "config-volume") pod "96dbb1d2-519a-44b5-a49e-fc94031dd3e8" (UID: "96dbb1d2-519a-44b5-a49e-fc94031dd3e8"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.755236 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "96dbb1d2-519a-44b5-a49e-fc94031dd3e8" (UID: "96dbb1d2-519a-44b5-a49e-fc94031dd3e8"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.755318 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-kube-api-access-btmx7" (OuterVolumeSpecName: "kube-api-access-btmx7") pod "96dbb1d2-519a-44b5-a49e-fc94031dd3e8" (UID: "96dbb1d2-519a-44b5-a49e-fc94031dd3e8"). InnerVolumeSpecName "kube-api-access-btmx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.851177 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.851226 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:15:03 crc kubenswrapper[4940]: I1126 08:15:03.851236 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btmx7\" (UniqueName: \"kubernetes.io/projected/96dbb1d2-519a-44b5-a49e-fc94031dd3e8-kube-api-access-btmx7\") on node \"crc\" DevicePath \"\"" Nov 26 08:15:04 crc kubenswrapper[4940]: I1126 08:15:04.400061 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" Nov 26 08:15:04 crc kubenswrapper[4940]: I1126 08:15:04.400140 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9" event={"ID":"96dbb1d2-519a-44b5-a49e-fc94031dd3e8","Type":"ContainerDied","Data":"bc0b019e4c4ac5afa2791300b231dcb9f9c46c0bb0c95ad74d88358dd37a012f"} Nov 26 08:15:04 crc kubenswrapper[4940]: I1126 08:15:04.400249 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc0b019e4c4ac5afa2791300b231dcb9f9c46c0bb0c95ad74d88358dd37a012f" Nov 26 08:15:04 crc kubenswrapper[4940]: I1126 08:15:04.447619 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9"] Nov 26 08:15:04 crc kubenswrapper[4940]: I1126 08:15:04.457308 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-vpbg9"] Nov 26 08:15:05 crc kubenswrapper[4940]: I1126 08:15:05.180887 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa5c8609-c89c-4674-9bae-9ef14ddca001" path="/var/lib/kubelet/pods/aa5c8609-c89c-4674-9bae-9ef14ddca001/volumes" Nov 26 08:15:08 crc kubenswrapper[4940]: I1126 08:15:08.814501 4940 scope.go:117] "RemoveContainer" containerID="641c47ea18b8b04f5d88a4eec1f14583507dfc7d925eda34dd58d0f2353b1d31" Nov 26 08:17:21 crc kubenswrapper[4940]: I1126 08:17:21.728499 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:17:21 crc kubenswrapper[4940]: I1126 08:17:21.729004 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:17:51 crc kubenswrapper[4940]: I1126 08:17:51.728514 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:17:51 crc kubenswrapper[4940]: I1126 08:17:51.729018 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.310992 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tbwl2"] Nov 26 08:18:05 crc kubenswrapper[4940]: E1126 08:18:05.311913 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96dbb1d2-519a-44b5-a49e-fc94031dd3e8" containerName="collect-profiles" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.311929 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="96dbb1d2-519a-44b5-a49e-fc94031dd3e8" containerName="collect-profiles" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.312185 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="96dbb1d2-519a-44b5-a49e-fc94031dd3e8" containerName="collect-profiles" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.313708 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.367449 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tbwl2"] Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.413451 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzddb\" (UniqueName: \"kubernetes.io/projected/d8dc6a5b-21e4-4694-a856-e135e154680d-kube-api-access-rzddb\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.413522 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-utilities\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.413553 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-catalog-content\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.515415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-utilities\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.515468 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-catalog-content\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.515541 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzddb\" (UniqueName: \"kubernetes.io/projected/d8dc6a5b-21e4-4694-a856-e135e154680d-kube-api-access-rzddb\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.516258 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-utilities\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.516285 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-catalog-content\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.537519 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzddb\" (UniqueName: \"kubernetes.io/projected/d8dc6a5b-21e4-4694-a856-e135e154680d-kube-api-access-rzddb\") pod \"redhat-operators-tbwl2\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:05 crc kubenswrapper[4940]: I1126 08:18:05.663590 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:06 crc kubenswrapper[4940]: I1126 08:18:06.097205 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tbwl2"] Nov 26 08:18:06 crc kubenswrapper[4940]: I1126 08:18:06.544698 4940 generic.go:334] "Generic (PLEG): container finished" podID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerID="6f733eab70152f1c3cbcb5e2cb7181db0bbca6f955db55704919cce99e4a5588" exitCode=0 Nov 26 08:18:06 crc kubenswrapper[4940]: I1126 08:18:06.544773 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbwl2" event={"ID":"d8dc6a5b-21e4-4694-a856-e135e154680d","Type":"ContainerDied","Data":"6f733eab70152f1c3cbcb5e2cb7181db0bbca6f955db55704919cce99e4a5588"} Nov 26 08:18:06 crc kubenswrapper[4940]: I1126 08:18:06.545064 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbwl2" event={"ID":"d8dc6a5b-21e4-4694-a856-e135e154680d","Type":"ContainerStarted","Data":"aa222a60bf21866f892b8fd408b6f3caa6469aef6ad72e86618cbdd1f0493a3d"} Nov 26 08:18:07 crc kubenswrapper[4940]: I1126 08:18:07.555969 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbwl2" event={"ID":"d8dc6a5b-21e4-4694-a856-e135e154680d","Type":"ContainerStarted","Data":"50550d6e2381c458d1e3beeac9fcad4fa69b23e4535fee0d997914e905db8718"} Nov 26 08:18:08 crc kubenswrapper[4940]: I1126 08:18:08.570746 4940 generic.go:334] "Generic (PLEG): container finished" podID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerID="50550d6e2381c458d1e3beeac9fcad4fa69b23e4535fee0d997914e905db8718" exitCode=0 Nov 26 08:18:08 crc kubenswrapper[4940]: I1126 08:18:08.570868 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbwl2" event={"ID":"d8dc6a5b-21e4-4694-a856-e135e154680d","Type":"ContainerDied","Data":"50550d6e2381c458d1e3beeac9fcad4fa69b23e4535fee0d997914e905db8718"} Nov 26 08:18:09 crc kubenswrapper[4940]: I1126 08:18:09.581461 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbwl2" event={"ID":"d8dc6a5b-21e4-4694-a856-e135e154680d","Type":"ContainerStarted","Data":"465cdad21008e107331b12ef883a19517e4ea202e42d70b8d4d3f3644a70835d"} Nov 26 08:18:09 crc kubenswrapper[4940]: I1126 08:18:09.602624 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tbwl2" podStartSLOduration=2.164962419 podStartE2EDuration="4.602605831s" podCreationTimestamp="2025-11-26 08:18:05 +0000 UTC" firstStartedPulling="2025-11-26 08:18:06.546714204 +0000 UTC m=+4988.066855843" lastFinishedPulling="2025-11-26 08:18:08.984357636 +0000 UTC m=+4990.504499255" observedRunningTime="2025-11-26 08:18:09.596239158 +0000 UTC m=+4991.116380787" watchObservedRunningTime="2025-11-26 08:18:09.602605831 +0000 UTC m=+4991.122747450" Nov 26 08:18:15 crc kubenswrapper[4940]: I1126 08:18:15.664437 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:15 crc kubenswrapper[4940]: I1126 08:18:15.665201 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:15 crc kubenswrapper[4940]: I1126 08:18:15.713312 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:16 crc kubenswrapper[4940]: I1126 08:18:16.705204 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:16 crc kubenswrapper[4940]: I1126 08:18:16.751193 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tbwl2"] Nov 26 08:18:18 crc kubenswrapper[4940]: I1126 08:18:18.652097 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tbwl2" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerName="registry-server" containerID="cri-o://465cdad21008e107331b12ef883a19517e4ea202e42d70b8d4d3f3644a70835d" gracePeriod=2 Nov 26 08:18:19 crc kubenswrapper[4940]: I1126 08:18:19.661300 4940 generic.go:334] "Generic (PLEG): container finished" podID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerID="465cdad21008e107331b12ef883a19517e4ea202e42d70b8d4d3f3644a70835d" exitCode=0 Nov 26 08:18:19 crc kubenswrapper[4940]: I1126 08:18:19.661356 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbwl2" event={"ID":"d8dc6a5b-21e4-4694-a856-e135e154680d","Type":"ContainerDied","Data":"465cdad21008e107331b12ef883a19517e4ea202e42d70b8d4d3f3644a70835d"} Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.205034 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.319723 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-utilities\") pod \"d8dc6a5b-21e4-4694-a856-e135e154680d\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.319807 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-catalog-content\") pod \"d8dc6a5b-21e4-4694-a856-e135e154680d\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.319911 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzddb\" (UniqueName: \"kubernetes.io/projected/d8dc6a5b-21e4-4694-a856-e135e154680d-kube-api-access-rzddb\") pod \"d8dc6a5b-21e4-4694-a856-e135e154680d\" (UID: \"d8dc6a5b-21e4-4694-a856-e135e154680d\") " Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.320798 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-utilities" (OuterVolumeSpecName: "utilities") pod "d8dc6a5b-21e4-4694-a856-e135e154680d" (UID: "d8dc6a5b-21e4-4694-a856-e135e154680d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.326164 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8dc6a5b-21e4-4694-a856-e135e154680d-kube-api-access-rzddb" (OuterVolumeSpecName: "kube-api-access-rzddb") pod "d8dc6a5b-21e4-4694-a856-e135e154680d" (UID: "d8dc6a5b-21e4-4694-a856-e135e154680d"). InnerVolumeSpecName "kube-api-access-rzddb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.410698 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8dc6a5b-21e4-4694-a856-e135e154680d" (UID: "d8dc6a5b-21e4-4694-a856-e135e154680d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.421771 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzddb\" (UniqueName: \"kubernetes.io/projected/d8dc6a5b-21e4-4694-a856-e135e154680d-kube-api-access-rzddb\") on node \"crc\" DevicePath \"\"" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.421807 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.421817 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8dc6a5b-21e4-4694-a856-e135e154680d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.672077 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbwl2" event={"ID":"d8dc6a5b-21e4-4694-a856-e135e154680d","Type":"ContainerDied","Data":"aa222a60bf21866f892b8fd408b6f3caa6469aef6ad72e86618cbdd1f0493a3d"} Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.672174 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbwl2" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.673077 4940 scope.go:117] "RemoveContainer" containerID="465cdad21008e107331b12ef883a19517e4ea202e42d70b8d4d3f3644a70835d" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.695792 4940 scope.go:117] "RemoveContainer" containerID="50550d6e2381c458d1e3beeac9fcad4fa69b23e4535fee0d997914e905db8718" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.710806 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tbwl2"] Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.717925 4940 scope.go:117] "RemoveContainer" containerID="6f733eab70152f1c3cbcb5e2cb7181db0bbca6f955db55704919cce99e4a5588" Nov 26 08:18:20 crc kubenswrapper[4940]: I1126 08:18:20.718899 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tbwl2"] Nov 26 08:18:21 crc kubenswrapper[4940]: I1126 08:18:21.173871 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" path="/var/lib/kubelet/pods/d8dc6a5b-21e4-4694-a856-e135e154680d/volumes" Nov 26 08:18:21 crc kubenswrapper[4940]: I1126 08:18:21.728671 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:18:21 crc kubenswrapper[4940]: I1126 08:18:21.729066 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:18:21 crc kubenswrapper[4940]: I1126 08:18:21.729139 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:18:21 crc kubenswrapper[4940]: I1126 08:18:21.729872 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"172ae1663b76c995626df76eac8bea676d78474fe1219d39d4996d31e4d42e86"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:18:21 crc kubenswrapper[4940]: I1126 08:18:21.729968 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://172ae1663b76c995626df76eac8bea676d78474fe1219d39d4996d31e4d42e86" gracePeriod=600 Nov 26 08:18:22 crc kubenswrapper[4940]: I1126 08:18:22.690892 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="172ae1663b76c995626df76eac8bea676d78474fe1219d39d4996d31e4d42e86" exitCode=0 Nov 26 08:18:22 crc kubenswrapper[4940]: I1126 08:18:22.690925 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"172ae1663b76c995626df76eac8bea676d78474fe1219d39d4996d31e4d42e86"} Nov 26 08:18:22 crc kubenswrapper[4940]: I1126 08:18:22.691224 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0"} Nov 26 08:18:22 crc kubenswrapper[4940]: I1126 08:18:22.691244 4940 scope.go:117] "RemoveContainer" containerID="8ca0053f3ce00a4e19f64d0f127be7e9123ef1262e60193f1f3a358926de379d" Nov 26 08:20:51 crc kubenswrapper[4940]: I1126 08:20:51.728610 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:20:51 crc kubenswrapper[4940]: I1126 08:20:51.729355 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:21:21 crc kubenswrapper[4940]: I1126 08:21:21.728937 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:21:21 crc kubenswrapper[4940]: I1126 08:21:21.729457 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.142413 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tftxl"] Nov 26 08:21:27 crc kubenswrapper[4940]: E1126 08:21:27.143879 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerName="registry-server" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.143905 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerName="registry-server" Nov 26 08:21:27 crc kubenswrapper[4940]: E1126 08:21:27.143943 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerName="extract-content" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.143954 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerName="extract-content" Nov 26 08:21:27 crc kubenswrapper[4940]: E1126 08:21:27.143973 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerName="extract-utilities" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.143981 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerName="extract-utilities" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.144193 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8dc6a5b-21e4-4694-a856-e135e154680d" containerName="registry-server" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.145340 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.152111 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tftxl"] Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.253108 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-catalog-content\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.253186 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlm2r\" (UniqueName: \"kubernetes.io/projected/1a4cae25-783a-41f4-ac2c-a97a07023d6e-kube-api-access-hlm2r\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.253223 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-utilities\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.354899 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlm2r\" (UniqueName: \"kubernetes.io/projected/1a4cae25-783a-41f4-ac2c-a97a07023d6e-kube-api-access-hlm2r\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.355266 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-utilities\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.355480 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-catalog-content\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.355869 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-utilities\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.356009 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-catalog-content\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.384566 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlm2r\" (UniqueName: \"kubernetes.io/projected/1a4cae25-783a-41f4-ac2c-a97a07023d6e-kube-api-access-hlm2r\") pod \"certified-operators-tftxl\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.472575 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:27 crc kubenswrapper[4940]: I1126 08:21:27.995652 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tftxl"] Nov 26 08:21:28 crc kubenswrapper[4940]: W1126 08:21:28.001711 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a4cae25_783a_41f4_ac2c_a97a07023d6e.slice/crio-ebf99efee0961f28933f72d890b29fc20b837cb0a0a55d7ee391b1971bd0d3c9 WatchSource:0}: Error finding container ebf99efee0961f28933f72d890b29fc20b837cb0a0a55d7ee391b1971bd0d3c9: Status 404 returned error can't find the container with id ebf99efee0961f28933f72d890b29fc20b837cb0a0a55d7ee391b1971bd0d3c9 Nov 26 08:21:28 crc kubenswrapper[4940]: I1126 08:21:28.357223 4940 generic.go:334] "Generic (PLEG): container finished" podID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerID="6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599" exitCode=0 Nov 26 08:21:28 crc kubenswrapper[4940]: I1126 08:21:28.357364 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tftxl" event={"ID":"1a4cae25-783a-41f4-ac2c-a97a07023d6e","Type":"ContainerDied","Data":"6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599"} Nov 26 08:21:28 crc kubenswrapper[4940]: I1126 08:21:28.357807 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tftxl" event={"ID":"1a4cae25-783a-41f4-ac2c-a97a07023d6e","Type":"ContainerStarted","Data":"ebf99efee0961f28933f72d890b29fc20b837cb0a0a55d7ee391b1971bd0d3c9"} Nov 26 08:21:28 crc kubenswrapper[4940]: I1126 08:21:28.359563 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:21:29 crc kubenswrapper[4940]: I1126 08:21:29.366983 4940 generic.go:334] "Generic (PLEG): container finished" podID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerID="64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2" exitCode=0 Nov 26 08:21:29 crc kubenswrapper[4940]: I1126 08:21:29.367233 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tftxl" event={"ID":"1a4cae25-783a-41f4-ac2c-a97a07023d6e","Type":"ContainerDied","Data":"64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2"} Nov 26 08:21:30 crc kubenswrapper[4940]: I1126 08:21:30.376430 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tftxl" event={"ID":"1a4cae25-783a-41f4-ac2c-a97a07023d6e","Type":"ContainerStarted","Data":"620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71"} Nov 26 08:21:30 crc kubenswrapper[4940]: I1126 08:21:30.400900 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tftxl" podStartSLOduration=2.008305167 podStartE2EDuration="3.400885742s" podCreationTimestamp="2025-11-26 08:21:27 +0000 UTC" firstStartedPulling="2025-11-26 08:21:28.359224882 +0000 UTC m=+5189.879366501" lastFinishedPulling="2025-11-26 08:21:29.751805457 +0000 UTC m=+5191.271947076" observedRunningTime="2025-11-26 08:21:30.394342054 +0000 UTC m=+5191.914483673" watchObservedRunningTime="2025-11-26 08:21:30.400885742 +0000 UTC m=+5191.921027361" Nov 26 08:21:37 crc kubenswrapper[4940]: I1126 08:21:37.472973 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:37 crc kubenswrapper[4940]: I1126 08:21:37.473877 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:37 crc kubenswrapper[4940]: I1126 08:21:37.546389 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:38 crc kubenswrapper[4940]: I1126 08:21:38.490280 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:38 crc kubenswrapper[4940]: I1126 08:21:38.538431 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tftxl"] Nov 26 08:21:40 crc kubenswrapper[4940]: I1126 08:21:40.448936 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tftxl" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerName="registry-server" containerID="cri-o://620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71" gracePeriod=2 Nov 26 08:21:40 crc kubenswrapper[4940]: I1126 08:21:40.867879 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.061479 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-catalog-content\") pod \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.061985 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlm2r\" (UniqueName: \"kubernetes.io/projected/1a4cae25-783a-41f4-ac2c-a97a07023d6e-kube-api-access-hlm2r\") pod \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.062276 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-utilities\") pod \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\" (UID: \"1a4cae25-783a-41f4-ac2c-a97a07023d6e\") " Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.063308 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-utilities" (OuterVolumeSpecName: "utilities") pod "1a4cae25-783a-41f4-ac2c-a97a07023d6e" (UID: "1a4cae25-783a-41f4-ac2c-a97a07023d6e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.069282 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a4cae25-783a-41f4-ac2c-a97a07023d6e-kube-api-access-hlm2r" (OuterVolumeSpecName: "kube-api-access-hlm2r") pod "1a4cae25-783a-41f4-ac2c-a97a07023d6e" (UID: "1a4cae25-783a-41f4-ac2c-a97a07023d6e"). InnerVolumeSpecName "kube-api-access-hlm2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.127219 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1a4cae25-783a-41f4-ac2c-a97a07023d6e" (UID: "1a4cae25-783a-41f4-ac2c-a97a07023d6e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.163410 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.163451 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlm2r\" (UniqueName: \"kubernetes.io/projected/1a4cae25-783a-41f4-ac2c-a97a07023d6e-kube-api-access-hlm2r\") on node \"crc\" DevicePath \"\"" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.163466 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a4cae25-783a-41f4-ac2c-a97a07023d6e-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.457738 4940 generic.go:334] "Generic (PLEG): container finished" podID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerID="620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71" exitCode=0 Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.457796 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tftxl" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.457811 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tftxl" event={"ID":"1a4cae25-783a-41f4-ac2c-a97a07023d6e","Type":"ContainerDied","Data":"620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71"} Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.457845 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tftxl" event={"ID":"1a4cae25-783a-41f4-ac2c-a97a07023d6e","Type":"ContainerDied","Data":"ebf99efee0961f28933f72d890b29fc20b837cb0a0a55d7ee391b1971bd0d3c9"} Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.457866 4940 scope.go:117] "RemoveContainer" containerID="620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.484938 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tftxl"] Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.489551 4940 scope.go:117] "RemoveContainer" containerID="64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.491323 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tftxl"] Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.516156 4940 scope.go:117] "RemoveContainer" containerID="6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.543980 4940 scope.go:117] "RemoveContainer" containerID="620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71" Nov 26 08:21:41 crc kubenswrapper[4940]: E1126 08:21:41.544475 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71\": container with ID starting with 620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71 not found: ID does not exist" containerID="620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.544595 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71"} err="failed to get container status \"620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71\": rpc error: code = NotFound desc = could not find container \"620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71\": container with ID starting with 620e1958b51e86acaa147d187f94335eec21e3c6f53e71c42c41031e52033b71 not found: ID does not exist" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.544678 4940 scope.go:117] "RemoveContainer" containerID="64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2" Nov 26 08:21:41 crc kubenswrapper[4940]: E1126 08:21:41.545133 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2\": container with ID starting with 64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2 not found: ID does not exist" containerID="64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.545233 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2"} err="failed to get container status \"64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2\": rpc error: code = NotFound desc = could not find container \"64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2\": container with ID starting with 64b3c17e092e08b43fc3aa742d9ee41a6c277cb484d5923485f326b6e915bac2 not found: ID does not exist" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.545301 4940 scope.go:117] "RemoveContainer" containerID="6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599" Nov 26 08:21:41 crc kubenswrapper[4940]: E1126 08:21:41.545680 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599\": container with ID starting with 6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599 not found: ID does not exist" containerID="6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599" Nov 26 08:21:41 crc kubenswrapper[4940]: I1126 08:21:41.545710 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599"} err="failed to get container status \"6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599\": rpc error: code = NotFound desc = could not find container \"6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599\": container with ID starting with 6c4b8328cd071f7de891a6547d1cbe4313d342f56260da3ae12f73f2f4616599 not found: ID does not exist" Nov 26 08:21:43 crc kubenswrapper[4940]: I1126 08:21:43.182932 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" path="/var/lib/kubelet/pods/1a4cae25-783a-41f4-ac2c-a97a07023d6e/volumes" Nov 26 08:21:51 crc kubenswrapper[4940]: I1126 08:21:51.728468 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:21:51 crc kubenswrapper[4940]: I1126 08:21:51.729127 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:21:51 crc kubenswrapper[4940]: I1126 08:21:51.729195 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:21:51 crc kubenswrapper[4940]: I1126 08:21:51.729962 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:21:51 crc kubenswrapper[4940]: I1126 08:21:51.730092 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" gracePeriod=600 Nov 26 08:21:51 crc kubenswrapper[4940]: E1126 08:21:51.858324 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:21:52 crc kubenswrapper[4940]: I1126 08:21:52.570097 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" exitCode=0 Nov 26 08:21:52 crc kubenswrapper[4940]: I1126 08:21:52.570192 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0"} Nov 26 08:21:52 crc kubenswrapper[4940]: I1126 08:21:52.570281 4940 scope.go:117] "RemoveContainer" containerID="172ae1663b76c995626df76eac8bea676d78474fe1219d39d4996d31e4d42e86" Nov 26 08:21:52 crc kubenswrapper[4940]: I1126 08:21:52.570881 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:21:52 crc kubenswrapper[4940]: E1126 08:21:52.571226 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.531471 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mtkw9"] Nov 26 08:21:53 crc kubenswrapper[4940]: E1126 08:21:53.532381 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerName="registry-server" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.532407 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerName="registry-server" Nov 26 08:21:53 crc kubenswrapper[4940]: E1126 08:21:53.532463 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerName="extract-content" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.532476 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerName="extract-content" Nov 26 08:21:53 crc kubenswrapper[4940]: E1126 08:21:53.532493 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerName="extract-utilities" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.532507 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerName="extract-utilities" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.532768 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a4cae25-783a-41f4-ac2c-a97a07023d6e" containerName="registry-server" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.535241 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.543927 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mtkw9"] Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.550594 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-utilities\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.550694 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-catalog-content\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.550787 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpww2\" (UniqueName: \"kubernetes.io/projected/956c8cfa-a13e-47d9-aaae-dfd9c797985c-kube-api-access-xpww2\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.651582 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-utilities\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.651638 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-catalog-content\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.651682 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpww2\" (UniqueName: \"kubernetes.io/projected/956c8cfa-a13e-47d9-aaae-dfd9c797985c-kube-api-access-xpww2\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.652330 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-utilities\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.655597 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-catalog-content\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.671452 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpww2\" (UniqueName: \"kubernetes.io/projected/956c8cfa-a13e-47d9-aaae-dfd9c797985c-kube-api-access-xpww2\") pod \"redhat-marketplace-mtkw9\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:53 crc kubenswrapper[4940]: I1126 08:21:53.866559 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:21:54 crc kubenswrapper[4940]: I1126 08:21:54.117245 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mtkw9"] Nov 26 08:21:54 crc kubenswrapper[4940]: I1126 08:21:54.605513 4940 generic.go:334] "Generic (PLEG): container finished" podID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerID="4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4" exitCode=0 Nov 26 08:21:54 crc kubenswrapper[4940]: I1126 08:21:54.605556 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mtkw9" event={"ID":"956c8cfa-a13e-47d9-aaae-dfd9c797985c","Type":"ContainerDied","Data":"4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4"} Nov 26 08:21:54 crc kubenswrapper[4940]: I1126 08:21:54.605584 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mtkw9" event={"ID":"956c8cfa-a13e-47d9-aaae-dfd9c797985c","Type":"ContainerStarted","Data":"c85e7ef109922146a353d5a62a1619b8a9833bc02de70857c1b7773f54f9bec5"} Nov 26 08:21:55 crc kubenswrapper[4940]: I1126 08:21:55.617733 4940 generic.go:334] "Generic (PLEG): container finished" podID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerID="0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395" exitCode=0 Nov 26 08:21:55 crc kubenswrapper[4940]: I1126 08:21:55.617842 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mtkw9" event={"ID":"956c8cfa-a13e-47d9-aaae-dfd9c797985c","Type":"ContainerDied","Data":"0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395"} Nov 26 08:21:56 crc kubenswrapper[4940]: I1126 08:21:56.628345 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mtkw9" event={"ID":"956c8cfa-a13e-47d9-aaae-dfd9c797985c","Type":"ContainerStarted","Data":"1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28"} Nov 26 08:21:56 crc kubenswrapper[4940]: I1126 08:21:56.648758 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mtkw9" podStartSLOduration=2.236523683 podStartE2EDuration="3.64874319s" podCreationTimestamp="2025-11-26 08:21:53 +0000 UTC" firstStartedPulling="2025-11-26 08:21:54.607140592 +0000 UTC m=+5216.127282251" lastFinishedPulling="2025-11-26 08:21:56.019360099 +0000 UTC m=+5217.539501758" observedRunningTime="2025-11-26 08:21:56.646583022 +0000 UTC m=+5218.166724641" watchObservedRunningTime="2025-11-26 08:21:56.64874319 +0000 UTC m=+5218.168884809" Nov 26 08:22:03 crc kubenswrapper[4940]: I1126 08:22:03.867547 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:22:03 crc kubenswrapper[4940]: I1126 08:22:03.868235 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:22:03 crc kubenswrapper[4940]: I1126 08:22:03.922577 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:22:04 crc kubenswrapper[4940]: I1126 08:22:04.165515 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:22:04 crc kubenswrapper[4940]: E1126 08:22:04.165980 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:22:04 crc kubenswrapper[4940]: I1126 08:22:04.743882 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:22:04 crc kubenswrapper[4940]: I1126 08:22:04.790736 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mtkw9"] Nov 26 08:22:06 crc kubenswrapper[4940]: I1126 08:22:06.705220 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mtkw9" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerName="registry-server" containerID="cri-o://1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28" gracePeriod=2 Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.141919 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.168622 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpww2\" (UniqueName: \"kubernetes.io/projected/956c8cfa-a13e-47d9-aaae-dfd9c797985c-kube-api-access-xpww2\") pod \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.168755 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-catalog-content\") pod \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.168898 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-utilities\") pod \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\" (UID: \"956c8cfa-a13e-47d9-aaae-dfd9c797985c\") " Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.171644 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-utilities" (OuterVolumeSpecName: "utilities") pod "956c8cfa-a13e-47d9-aaae-dfd9c797985c" (UID: "956c8cfa-a13e-47d9-aaae-dfd9c797985c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.178242 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/956c8cfa-a13e-47d9-aaae-dfd9c797985c-kube-api-access-xpww2" (OuterVolumeSpecName: "kube-api-access-xpww2") pod "956c8cfa-a13e-47d9-aaae-dfd9c797985c" (UID: "956c8cfa-a13e-47d9-aaae-dfd9c797985c"). InnerVolumeSpecName "kube-api-access-xpww2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.190110 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "956c8cfa-a13e-47d9-aaae-dfd9c797985c" (UID: "956c8cfa-a13e-47d9-aaae-dfd9c797985c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.270675 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.270708 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpww2\" (UniqueName: \"kubernetes.io/projected/956c8cfa-a13e-47d9-aaae-dfd9c797985c-kube-api-access-xpww2\") on node \"crc\" DevicePath \"\"" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.270717 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/956c8cfa-a13e-47d9-aaae-dfd9c797985c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.716069 4940 generic.go:334] "Generic (PLEG): container finished" podID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerID="1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28" exitCode=0 Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.716202 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mtkw9" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.716195 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mtkw9" event={"ID":"956c8cfa-a13e-47d9-aaae-dfd9c797985c","Type":"ContainerDied","Data":"1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28"} Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.716540 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mtkw9" event={"ID":"956c8cfa-a13e-47d9-aaae-dfd9c797985c","Type":"ContainerDied","Data":"c85e7ef109922146a353d5a62a1619b8a9833bc02de70857c1b7773f54f9bec5"} Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.716560 4940 scope.go:117] "RemoveContainer" containerID="1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.744860 4940 scope.go:117] "RemoveContainer" containerID="0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.752202 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mtkw9"] Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.760540 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mtkw9"] Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.779160 4940 scope.go:117] "RemoveContainer" containerID="4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.792377 4940 scope.go:117] "RemoveContainer" containerID="1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28" Nov 26 08:22:07 crc kubenswrapper[4940]: E1126 08:22:07.792753 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28\": container with ID starting with 1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28 not found: ID does not exist" containerID="1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.792783 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28"} err="failed to get container status \"1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28\": rpc error: code = NotFound desc = could not find container \"1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28\": container with ID starting with 1bbcd8a1962dca6a3be4a98a0ced236aee2f43a77ef7d57f905958d2593c7b28 not found: ID does not exist" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.792805 4940 scope.go:117] "RemoveContainer" containerID="0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395" Nov 26 08:22:07 crc kubenswrapper[4940]: E1126 08:22:07.793058 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395\": container with ID starting with 0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395 not found: ID does not exist" containerID="0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.793077 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395"} err="failed to get container status \"0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395\": rpc error: code = NotFound desc = could not find container \"0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395\": container with ID starting with 0d55f78894a30e4fef707cebdbe4a95b2bfbc85a90a58a901c5197ef58fd4395 not found: ID does not exist" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.793102 4940 scope.go:117] "RemoveContainer" containerID="4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4" Nov 26 08:22:07 crc kubenswrapper[4940]: E1126 08:22:07.793415 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4\": container with ID starting with 4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4 not found: ID does not exist" containerID="4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4" Nov 26 08:22:07 crc kubenswrapper[4940]: I1126 08:22:07.793459 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4"} err="failed to get container status \"4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4\": rpc error: code = NotFound desc = could not find container \"4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4\": container with ID starting with 4b61cf5f609ea91afe1164ad479dbb1be096d8a3118775808b9434924c7b49a4 not found: ID does not exist" Nov 26 08:22:09 crc kubenswrapper[4940]: I1126 08:22:09.176294 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" path="/var/lib/kubelet/pods/956c8cfa-a13e-47d9-aaae-dfd9c797985c/volumes" Nov 26 08:22:15 crc kubenswrapper[4940]: I1126 08:22:15.165698 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:22:15 crc kubenswrapper[4940]: E1126 08:22:15.166615 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:22:29 crc kubenswrapper[4940]: I1126 08:22:29.171640 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:22:29 crc kubenswrapper[4940]: E1126 08:22:29.172711 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:22:41 crc kubenswrapper[4940]: I1126 08:22:41.166268 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:22:41 crc kubenswrapper[4940]: E1126 08:22:41.167137 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:22:53 crc kubenswrapper[4940]: I1126 08:22:53.166872 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:22:53 crc kubenswrapper[4940]: E1126 08:22:53.167941 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:23:07 crc kubenswrapper[4940]: I1126 08:23:07.165848 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:23:07 crc kubenswrapper[4940]: E1126 08:23:07.166864 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:23:20 crc kubenswrapper[4940]: I1126 08:23:20.165407 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:23:20 crc kubenswrapper[4940]: E1126 08:23:20.166137 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:23:31 crc kubenswrapper[4940]: I1126 08:23:31.165752 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:23:31 crc kubenswrapper[4940]: E1126 08:23:31.166548 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:23:43 crc kubenswrapper[4940]: I1126 08:23:43.165650 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:23:43 crc kubenswrapper[4940]: E1126 08:23:43.166696 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:23:56 crc kubenswrapper[4940]: I1126 08:23:56.166280 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:23:56 crc kubenswrapper[4940]: E1126 08:23:56.167405 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:24:09 crc kubenswrapper[4940]: I1126 08:24:09.168901 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:24:09 crc kubenswrapper[4940]: E1126 08:24:09.169666 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:24:22 crc kubenswrapper[4940]: I1126 08:24:22.165228 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:24:22 crc kubenswrapper[4940]: E1126 08:24:22.166408 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:24:37 crc kubenswrapper[4940]: I1126 08:24:37.165331 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:24:37 crc kubenswrapper[4940]: E1126 08:24:37.166148 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:24:51 crc kubenswrapper[4940]: I1126 08:24:51.167432 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:24:51 crc kubenswrapper[4940]: E1126 08:24:51.168457 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:25:03 crc kubenswrapper[4940]: I1126 08:25:03.165314 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:25:03 crc kubenswrapper[4940]: E1126 08:25:03.166440 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.270947 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4ts9l"] Nov 26 08:25:10 crc kubenswrapper[4940]: E1126 08:25:10.272221 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerName="extract-utilities" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.272235 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerName="extract-utilities" Nov 26 08:25:10 crc kubenswrapper[4940]: E1126 08:25:10.272273 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerName="registry-server" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.272279 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerName="registry-server" Nov 26 08:25:10 crc kubenswrapper[4940]: E1126 08:25:10.272289 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerName="extract-content" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.272295 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerName="extract-content" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.272433 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="956c8cfa-a13e-47d9-aaae-dfd9c797985c" containerName="registry-server" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.273472 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.327535 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4ts9l"] Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.382401 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-catalog-content\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.382484 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8qcx\" (UniqueName: \"kubernetes.io/projected/03673141-3b6e-4c77-aea3-ed73b24f3d59-kube-api-access-l8qcx\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.382669 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-utilities\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.484330 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-catalog-content\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.484634 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8qcx\" (UniqueName: \"kubernetes.io/projected/03673141-3b6e-4c77-aea3-ed73b24f3d59-kube-api-access-l8qcx\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.484761 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-utilities\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.485595 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-catalog-content\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.485896 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-utilities\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.505465 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8qcx\" (UniqueName: \"kubernetes.io/projected/03673141-3b6e-4c77-aea3-ed73b24f3d59-kube-api-access-l8qcx\") pod \"community-operators-4ts9l\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:10 crc kubenswrapper[4940]: I1126 08:25:10.653257 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:11 crc kubenswrapper[4940]: I1126 08:25:11.153961 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4ts9l"] Nov 26 08:25:11 crc kubenswrapper[4940]: W1126 08:25:11.164647 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03673141_3b6e_4c77_aea3_ed73b24f3d59.slice/crio-88b02f3302347090e1e6a74e24bb6f7634306f16a99b65c731357764169782e8 WatchSource:0}: Error finding container 88b02f3302347090e1e6a74e24bb6f7634306f16a99b65c731357764169782e8: Status 404 returned error can't find the container with id 88b02f3302347090e1e6a74e24bb6f7634306f16a99b65c731357764169782e8 Nov 26 08:25:11 crc kubenswrapper[4940]: I1126 08:25:11.370741 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4ts9l" event={"ID":"03673141-3b6e-4c77-aea3-ed73b24f3d59","Type":"ContainerStarted","Data":"88b02f3302347090e1e6a74e24bb6f7634306f16a99b65c731357764169782e8"} Nov 26 08:25:12 crc kubenswrapper[4940]: I1126 08:25:12.383404 4940 generic.go:334] "Generic (PLEG): container finished" podID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerID="70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc" exitCode=0 Nov 26 08:25:12 crc kubenswrapper[4940]: I1126 08:25:12.383451 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4ts9l" event={"ID":"03673141-3b6e-4c77-aea3-ed73b24f3d59","Type":"ContainerDied","Data":"70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc"} Nov 26 08:25:13 crc kubenswrapper[4940]: I1126 08:25:13.398964 4940 generic.go:334] "Generic (PLEG): container finished" podID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerID="a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f" exitCode=0 Nov 26 08:25:13 crc kubenswrapper[4940]: I1126 08:25:13.399026 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4ts9l" event={"ID":"03673141-3b6e-4c77-aea3-ed73b24f3d59","Type":"ContainerDied","Data":"a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f"} Nov 26 08:25:14 crc kubenswrapper[4940]: I1126 08:25:14.417927 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4ts9l" event={"ID":"03673141-3b6e-4c77-aea3-ed73b24f3d59","Type":"ContainerStarted","Data":"370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209"} Nov 26 08:25:14 crc kubenswrapper[4940]: I1126 08:25:14.455067 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4ts9l" podStartSLOduration=3.039926957 podStartE2EDuration="4.455016515s" podCreationTimestamp="2025-11-26 08:25:10 +0000 UTC" firstStartedPulling="2025-11-26 08:25:12.388122086 +0000 UTC m=+5413.908263745" lastFinishedPulling="2025-11-26 08:25:13.803211674 +0000 UTC m=+5415.323353303" observedRunningTime="2025-11-26 08:25:14.449297964 +0000 UTC m=+5415.969439593" watchObservedRunningTime="2025-11-26 08:25:14.455016515 +0000 UTC m=+5415.975158174" Nov 26 08:25:17 crc kubenswrapper[4940]: I1126 08:25:17.166153 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:25:17 crc kubenswrapper[4940]: E1126 08:25:17.166814 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:25:20 crc kubenswrapper[4940]: I1126 08:25:20.653488 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:20 crc kubenswrapper[4940]: I1126 08:25:20.653909 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:20 crc kubenswrapper[4940]: I1126 08:25:20.704380 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:21 crc kubenswrapper[4940]: I1126 08:25:21.527739 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:21 crc kubenswrapper[4940]: I1126 08:25:21.579079 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4ts9l"] Nov 26 08:25:23 crc kubenswrapper[4940]: I1126 08:25:23.494378 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4ts9l" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerName="registry-server" containerID="cri-o://370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209" gracePeriod=2 Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.011297 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.111553 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-catalog-content\") pod \"03673141-3b6e-4c77-aea3-ed73b24f3d59\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.111634 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8qcx\" (UniqueName: \"kubernetes.io/projected/03673141-3b6e-4c77-aea3-ed73b24f3d59-kube-api-access-l8qcx\") pod \"03673141-3b6e-4c77-aea3-ed73b24f3d59\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.111718 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-utilities\") pod \"03673141-3b6e-4c77-aea3-ed73b24f3d59\" (UID: \"03673141-3b6e-4c77-aea3-ed73b24f3d59\") " Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.112699 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-utilities" (OuterVolumeSpecName: "utilities") pod "03673141-3b6e-4c77-aea3-ed73b24f3d59" (UID: "03673141-3b6e-4c77-aea3-ed73b24f3d59"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.117498 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03673141-3b6e-4c77-aea3-ed73b24f3d59-kube-api-access-l8qcx" (OuterVolumeSpecName: "kube-api-access-l8qcx") pod "03673141-3b6e-4c77-aea3-ed73b24f3d59" (UID: "03673141-3b6e-4c77-aea3-ed73b24f3d59"). InnerVolumeSpecName "kube-api-access-l8qcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.163523 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "03673141-3b6e-4c77-aea3-ed73b24f3d59" (UID: "03673141-3b6e-4c77-aea3-ed73b24f3d59"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.213908 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8qcx\" (UniqueName: \"kubernetes.io/projected/03673141-3b6e-4c77-aea3-ed73b24f3d59-kube-api-access-l8qcx\") on node \"crc\" DevicePath \"\"" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.213939 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.213949 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03673141-3b6e-4c77-aea3-ed73b24f3d59-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.503061 4940 generic.go:334] "Generic (PLEG): container finished" podID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerID="370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209" exitCode=0 Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.503101 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4ts9l" event={"ID":"03673141-3b6e-4c77-aea3-ed73b24f3d59","Type":"ContainerDied","Data":"370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209"} Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.503151 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4ts9l" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.503175 4940 scope.go:117] "RemoveContainer" containerID="370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.503158 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4ts9l" event={"ID":"03673141-3b6e-4c77-aea3-ed73b24f3d59","Type":"ContainerDied","Data":"88b02f3302347090e1e6a74e24bb6f7634306f16a99b65c731357764169782e8"} Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.526410 4940 scope.go:117] "RemoveContainer" containerID="a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.535947 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4ts9l"] Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.542670 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4ts9l"] Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.575966 4940 scope.go:117] "RemoveContainer" containerID="70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.603021 4940 scope.go:117] "RemoveContainer" containerID="370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209" Nov 26 08:25:24 crc kubenswrapper[4940]: E1126 08:25:24.603703 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209\": container with ID starting with 370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209 not found: ID does not exist" containerID="370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.603795 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209"} err="failed to get container status \"370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209\": rpc error: code = NotFound desc = could not find container \"370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209\": container with ID starting with 370d4d65ea088ba68a2cdc4d89fd4198abc68ef3b7ca6e496decb45c58caf209 not found: ID does not exist" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.603833 4940 scope.go:117] "RemoveContainer" containerID="a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f" Nov 26 08:25:24 crc kubenswrapper[4940]: E1126 08:25:24.604361 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f\": container with ID starting with a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f not found: ID does not exist" containerID="a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.604446 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f"} err="failed to get container status \"a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f\": rpc error: code = NotFound desc = could not find container \"a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f\": container with ID starting with a350b84c914889e4162599af9d7d7f01e46a4652d22935be26231c65a1585a8f not found: ID does not exist" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.604501 4940 scope.go:117] "RemoveContainer" containerID="70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc" Nov 26 08:25:24 crc kubenswrapper[4940]: E1126 08:25:24.605245 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc\": container with ID starting with 70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc not found: ID does not exist" containerID="70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc" Nov 26 08:25:24 crc kubenswrapper[4940]: I1126 08:25:24.605283 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc"} err="failed to get container status \"70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc\": rpc error: code = NotFound desc = could not find container \"70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc\": container with ID starting with 70438c4050bd3a8c074c53314935ac807d3fba448d3098579c2a0f901a956dcc not found: ID does not exist" Nov 26 08:25:25 crc kubenswrapper[4940]: I1126 08:25:25.177717 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" path="/var/lib/kubelet/pods/03673141-3b6e-4c77-aea3-ed73b24f3d59/volumes" Nov 26 08:25:32 crc kubenswrapper[4940]: I1126 08:25:32.165169 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:25:32 crc kubenswrapper[4940]: E1126 08:25:32.167215 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:25:43 crc kubenswrapper[4940]: I1126 08:25:43.165998 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:25:43 crc kubenswrapper[4940]: E1126 08:25:43.166971 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:25:56 crc kubenswrapper[4940]: I1126 08:25:56.165251 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:25:56 crc kubenswrapper[4940]: E1126 08:25:56.166134 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:26:07 crc kubenswrapper[4940]: I1126 08:26:07.167078 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:26:07 crc kubenswrapper[4940]: E1126 08:26:07.168386 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:26:22 crc kubenswrapper[4940]: I1126 08:26:22.166451 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:26:22 crc kubenswrapper[4940]: E1126 08:26:22.167803 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:26:35 crc kubenswrapper[4940]: I1126 08:26:35.166029 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:26:35 crc kubenswrapper[4940]: E1126 08:26:35.167121 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:26:46 crc kubenswrapper[4940]: I1126 08:26:46.167303 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:26:46 crc kubenswrapper[4940]: E1126 08:26:46.168270 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:26:57 crc kubenswrapper[4940]: I1126 08:26:57.165028 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:26:57 crc kubenswrapper[4940]: I1126 08:26:57.374874 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"cc9b88f131c13a12d01b7bae4e849661d4ac4d50a4ca0815449140f6bec70a5d"} Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.797494 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-xnnpc"] Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.804671 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-xnnpc"] Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.988873 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-dh4vd"] Nov 26 08:27:45 crc kubenswrapper[4940]: E1126 08:27:45.989486 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerName="registry-server" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.989563 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerName="registry-server" Nov 26 08:27:45 crc kubenswrapper[4940]: E1126 08:27:45.989638 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerName="extract-utilities" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.989691 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerName="extract-utilities" Nov 26 08:27:45 crc kubenswrapper[4940]: E1126 08:27:45.989763 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerName="extract-content" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.989824 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerName="extract-content" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.990028 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="03673141-3b6e-4c77-aea3-ed73b24f3d59" containerName="registry-server" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.990651 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.993806 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.993952 4940 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-hqrk2" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.994510 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.995232 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-dh4vd"] Nov 26 08:27:45 crc kubenswrapper[4940]: I1126 08:27:45.999208 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.108306 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmctv\" (UniqueName: \"kubernetes.io/projected/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-kube-api-access-vmctv\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.108707 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-node-mnt\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.108921 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-crc-storage\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.210550 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-crc-storage\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.210644 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmctv\" (UniqueName: \"kubernetes.io/projected/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-kube-api-access-vmctv\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.210700 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-node-mnt\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.211546 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-node-mnt\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.212269 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-crc-storage\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.236646 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmctv\" (UniqueName: \"kubernetes.io/projected/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-kube-api-access-vmctv\") pod \"crc-storage-crc-dh4vd\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.325685 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.772093 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-dh4vd"] Nov 26 08:27:46 crc kubenswrapper[4940]: I1126 08:27:46.781634 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:27:47 crc kubenswrapper[4940]: I1126 08:27:47.177630 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e65a658-9d09-4b66-9e51-f2e6fd7bf84d" path="/var/lib/kubelet/pods/8e65a658-9d09-4b66-9e51-f2e6fd7bf84d/volumes" Nov 26 08:27:47 crc kubenswrapper[4940]: I1126 08:27:47.785622 4940 generic.go:334] "Generic (PLEG): container finished" podID="bf936f7c-ebe2-477a-9fe1-7fb5296d7733" containerID="0f930a64618fbc77bcbf6e2a205d16cc371a04f88c8879791cb093d72e8c48f8" exitCode=0 Nov 26 08:27:47 crc kubenswrapper[4940]: I1126 08:27:47.785702 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dh4vd" event={"ID":"bf936f7c-ebe2-477a-9fe1-7fb5296d7733","Type":"ContainerDied","Data":"0f930a64618fbc77bcbf6e2a205d16cc371a04f88c8879791cb093d72e8c48f8"} Nov 26 08:27:47 crc kubenswrapper[4940]: I1126 08:27:47.786516 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dh4vd" event={"ID":"bf936f7c-ebe2-477a-9fe1-7fb5296d7733","Type":"ContainerStarted","Data":"f4196ef5d05c9ab59365c31de6428f3e2fedaa6be0d473461f84045d5d14afa3"} Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.103596 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.161393 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmctv\" (UniqueName: \"kubernetes.io/projected/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-kube-api-access-vmctv\") pod \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.161449 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-crc-storage\") pod \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.161476 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-node-mnt\") pod \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\" (UID: \"bf936f7c-ebe2-477a-9fe1-7fb5296d7733\") " Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.161861 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "bf936f7c-ebe2-477a-9fe1-7fb5296d7733" (UID: "bf936f7c-ebe2-477a-9fe1-7fb5296d7733"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.166440 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-kube-api-access-vmctv" (OuterVolumeSpecName: "kube-api-access-vmctv") pod "bf936f7c-ebe2-477a-9fe1-7fb5296d7733" (UID: "bf936f7c-ebe2-477a-9fe1-7fb5296d7733"). InnerVolumeSpecName "kube-api-access-vmctv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.179778 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "bf936f7c-ebe2-477a-9fe1-7fb5296d7733" (UID: "bf936f7c-ebe2-477a-9fe1-7fb5296d7733"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.262801 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmctv\" (UniqueName: \"kubernetes.io/projected/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-kube-api-access-vmctv\") on node \"crc\" DevicePath \"\"" Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.262834 4940 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.262844 4940 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/bf936f7c-ebe2-477a-9fe1-7fb5296d7733-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.809192 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dh4vd" event={"ID":"bf936f7c-ebe2-477a-9fe1-7fb5296d7733","Type":"ContainerDied","Data":"f4196ef5d05c9ab59365c31de6428f3e2fedaa6be0d473461f84045d5d14afa3"} Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.809247 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4196ef5d05c9ab59365c31de6428f3e2fedaa6be0d473461f84045d5d14afa3" Nov 26 08:27:49 crc kubenswrapper[4940]: I1126 08:27:49.809258 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dh4vd" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.484925 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-dh4vd"] Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.491888 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-dh4vd"] Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.629710 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-84bwf"] Nov 26 08:27:51 crc kubenswrapper[4940]: E1126 08:27:51.630462 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf936f7c-ebe2-477a-9fe1-7fb5296d7733" containerName="storage" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.630513 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf936f7c-ebe2-477a-9fe1-7fb5296d7733" containerName="storage" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.630850 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf936f7c-ebe2-477a-9fe1-7fb5296d7733" containerName="storage" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.631710 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.635360 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.636232 4940 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-hqrk2" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.636974 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.639482 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.644763 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-84bwf"] Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.703162 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1a909f43-c961-4b19-9ecf-7f65be297932-node-mnt\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.703221 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5zg2\" (UniqueName: \"kubernetes.io/projected/1a909f43-c961-4b19-9ecf-7f65be297932-kube-api-access-b5zg2\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.703251 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1a909f43-c961-4b19-9ecf-7f65be297932-crc-storage\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.804560 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1a909f43-c961-4b19-9ecf-7f65be297932-node-mnt\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.804641 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5zg2\" (UniqueName: \"kubernetes.io/projected/1a909f43-c961-4b19-9ecf-7f65be297932-kube-api-access-b5zg2\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.804676 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1a909f43-c961-4b19-9ecf-7f65be297932-crc-storage\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.805024 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1a909f43-c961-4b19-9ecf-7f65be297932-node-mnt\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.805817 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1a909f43-c961-4b19-9ecf-7f65be297932-crc-storage\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.831512 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5zg2\" (UniqueName: \"kubernetes.io/projected/1a909f43-c961-4b19-9ecf-7f65be297932-kube-api-access-b5zg2\") pod \"crc-storage-crc-84bwf\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:51 crc kubenswrapper[4940]: I1126 08:27:51.967172 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:52 crc kubenswrapper[4940]: I1126 08:27:52.412293 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-84bwf"] Nov 26 08:27:52 crc kubenswrapper[4940]: I1126 08:27:52.840809 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-84bwf" event={"ID":"1a909f43-c961-4b19-9ecf-7f65be297932","Type":"ContainerStarted","Data":"25229c8596a9bdc54dc53a5914ea5e139e110c21e7779c4f291474584f3c20bc"} Nov 26 08:27:53 crc kubenswrapper[4940]: I1126 08:27:53.177764 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf936f7c-ebe2-477a-9fe1-7fb5296d7733" path="/var/lib/kubelet/pods/bf936f7c-ebe2-477a-9fe1-7fb5296d7733/volumes" Nov 26 08:27:53 crc kubenswrapper[4940]: I1126 08:27:53.856965 4940 generic.go:334] "Generic (PLEG): container finished" podID="1a909f43-c961-4b19-9ecf-7f65be297932" containerID="f780d7dfcae11c253041f2bfd37668c0bfe79fe6d24c829bca2fec48d5c97d5b" exitCode=0 Nov 26 08:27:53 crc kubenswrapper[4940]: I1126 08:27:53.857107 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-84bwf" event={"ID":"1a909f43-c961-4b19-9ecf-7f65be297932","Type":"ContainerDied","Data":"f780d7dfcae11c253041f2bfd37668c0bfe79fe6d24c829bca2fec48d5c97d5b"} Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.150878 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.277654 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1a909f43-c961-4b19-9ecf-7f65be297932-node-mnt\") pod \"1a909f43-c961-4b19-9ecf-7f65be297932\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.277716 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1a909f43-c961-4b19-9ecf-7f65be297932-crc-storage\") pod \"1a909f43-c961-4b19-9ecf-7f65be297932\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.277763 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5zg2\" (UniqueName: \"kubernetes.io/projected/1a909f43-c961-4b19-9ecf-7f65be297932-kube-api-access-b5zg2\") pod \"1a909f43-c961-4b19-9ecf-7f65be297932\" (UID: \"1a909f43-c961-4b19-9ecf-7f65be297932\") " Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.277851 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a909f43-c961-4b19-9ecf-7f65be297932-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "1a909f43-c961-4b19-9ecf-7f65be297932" (UID: "1a909f43-c961-4b19-9ecf-7f65be297932"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.278202 4940 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1a909f43-c961-4b19-9ecf-7f65be297932-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.284361 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a909f43-c961-4b19-9ecf-7f65be297932-kube-api-access-b5zg2" (OuterVolumeSpecName: "kube-api-access-b5zg2") pod "1a909f43-c961-4b19-9ecf-7f65be297932" (UID: "1a909f43-c961-4b19-9ecf-7f65be297932"). InnerVolumeSpecName "kube-api-access-b5zg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.303900 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a909f43-c961-4b19-9ecf-7f65be297932-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "1a909f43-c961-4b19-9ecf-7f65be297932" (UID: "1a909f43-c961-4b19-9ecf-7f65be297932"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.379267 4940 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1a909f43-c961-4b19-9ecf-7f65be297932-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.379316 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5zg2\" (UniqueName: \"kubernetes.io/projected/1a909f43-c961-4b19-9ecf-7f65be297932-kube-api-access-b5zg2\") on node \"crc\" DevicePath \"\"" Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.880655 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-84bwf" event={"ID":"1a909f43-c961-4b19-9ecf-7f65be297932","Type":"ContainerDied","Data":"25229c8596a9bdc54dc53a5914ea5e139e110c21e7779c4f291474584f3c20bc"} Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.880705 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25229c8596a9bdc54dc53a5914ea5e139e110c21e7779c4f291474584f3c20bc" Nov 26 08:27:55 crc kubenswrapper[4940]: I1126 08:27:55.880722 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-84bwf" Nov 26 08:28:09 crc kubenswrapper[4940]: I1126 08:28:09.113554 4940 scope.go:117] "RemoveContainer" containerID="603e87a6f1a4546633c64ac4957fae9eba89b0435e784ab8317cce2415fc6b1c" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.161982 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wqpp4"] Nov 26 08:29:02 crc kubenswrapper[4940]: E1126 08:29:02.163145 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a909f43-c961-4b19-9ecf-7f65be297932" containerName="storage" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.163166 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a909f43-c961-4b19-9ecf-7f65be297932" containerName="storage" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.163421 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a909f43-c961-4b19-9ecf-7f65be297932" containerName="storage" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.165121 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.181806 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wqpp4"] Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.283273 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-catalog-content\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.283368 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mc2r\" (UniqueName: \"kubernetes.io/projected/327e86d8-0308-42e1-a903-5a506fddf823-kube-api-access-6mc2r\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.283422 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-utilities\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.385011 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-catalog-content\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.385102 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mc2r\" (UniqueName: \"kubernetes.io/projected/327e86d8-0308-42e1-a903-5a506fddf823-kube-api-access-6mc2r\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.385331 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-utilities\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.385840 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-catalog-content\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.386091 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-utilities\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.407011 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mc2r\" (UniqueName: \"kubernetes.io/projected/327e86d8-0308-42e1-a903-5a506fddf823-kube-api-access-6mc2r\") pod \"redhat-operators-wqpp4\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.493698 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:02 crc kubenswrapper[4940]: I1126 08:29:02.910151 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wqpp4"] Nov 26 08:29:03 crc kubenswrapper[4940]: I1126 08:29:03.444831 4940 generic.go:334] "Generic (PLEG): container finished" podID="327e86d8-0308-42e1-a903-5a506fddf823" containerID="80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5" exitCode=0 Nov 26 08:29:03 crc kubenswrapper[4940]: I1126 08:29:03.444877 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqpp4" event={"ID":"327e86d8-0308-42e1-a903-5a506fddf823","Type":"ContainerDied","Data":"80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5"} Nov 26 08:29:03 crc kubenswrapper[4940]: I1126 08:29:03.445224 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqpp4" event={"ID":"327e86d8-0308-42e1-a903-5a506fddf823","Type":"ContainerStarted","Data":"88b64f6d29ce73feb1050d998299c0e5f695e673e094541ff9cb255b917bace4"} Nov 26 08:29:04 crc kubenswrapper[4940]: I1126 08:29:04.455550 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqpp4" event={"ID":"327e86d8-0308-42e1-a903-5a506fddf823","Type":"ContainerStarted","Data":"bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9"} Nov 26 08:29:05 crc kubenswrapper[4940]: I1126 08:29:05.464297 4940 generic.go:334] "Generic (PLEG): container finished" podID="327e86d8-0308-42e1-a903-5a506fddf823" containerID="bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9" exitCode=0 Nov 26 08:29:05 crc kubenswrapper[4940]: I1126 08:29:05.464354 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqpp4" event={"ID":"327e86d8-0308-42e1-a903-5a506fddf823","Type":"ContainerDied","Data":"bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9"} Nov 26 08:29:06 crc kubenswrapper[4940]: I1126 08:29:06.474243 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqpp4" event={"ID":"327e86d8-0308-42e1-a903-5a506fddf823","Type":"ContainerStarted","Data":"f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be"} Nov 26 08:29:06 crc kubenswrapper[4940]: I1126 08:29:06.493056 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wqpp4" podStartSLOduration=1.947521705 podStartE2EDuration="4.49301721s" podCreationTimestamp="2025-11-26 08:29:02 +0000 UTC" firstStartedPulling="2025-11-26 08:29:03.446222965 +0000 UTC m=+5644.966364584" lastFinishedPulling="2025-11-26 08:29:05.99171845 +0000 UTC m=+5647.511860089" observedRunningTime="2025-11-26 08:29:06.488679252 +0000 UTC m=+5648.008820871" watchObservedRunningTime="2025-11-26 08:29:06.49301721 +0000 UTC m=+5648.013158829" Nov 26 08:29:12 crc kubenswrapper[4940]: I1126 08:29:12.493931 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:12 crc kubenswrapper[4940]: I1126 08:29:12.494381 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:12 crc kubenswrapper[4940]: I1126 08:29:12.540114 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:12 crc kubenswrapper[4940]: I1126 08:29:12.596338 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:12 crc kubenswrapper[4940]: I1126 08:29:12.780966 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wqpp4"] Nov 26 08:29:14 crc kubenswrapper[4940]: I1126 08:29:14.536272 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wqpp4" podUID="327e86d8-0308-42e1-a903-5a506fddf823" containerName="registry-server" containerID="cri-o://f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be" gracePeriod=2 Nov 26 08:29:14 crc kubenswrapper[4940]: I1126 08:29:14.935922 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.056789 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-utilities\") pod \"327e86d8-0308-42e1-a903-5a506fddf823\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.056909 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mc2r\" (UniqueName: \"kubernetes.io/projected/327e86d8-0308-42e1-a903-5a506fddf823-kube-api-access-6mc2r\") pod \"327e86d8-0308-42e1-a903-5a506fddf823\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.056972 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-catalog-content\") pod \"327e86d8-0308-42e1-a903-5a506fddf823\" (UID: \"327e86d8-0308-42e1-a903-5a506fddf823\") " Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.057824 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-utilities" (OuterVolumeSpecName: "utilities") pod "327e86d8-0308-42e1-a903-5a506fddf823" (UID: "327e86d8-0308-42e1-a903-5a506fddf823"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.063610 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/327e86d8-0308-42e1-a903-5a506fddf823-kube-api-access-6mc2r" (OuterVolumeSpecName: "kube-api-access-6mc2r") pod "327e86d8-0308-42e1-a903-5a506fddf823" (UID: "327e86d8-0308-42e1-a903-5a506fddf823"). InnerVolumeSpecName "kube-api-access-6mc2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.158475 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.158522 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mc2r\" (UniqueName: \"kubernetes.io/projected/327e86d8-0308-42e1-a903-5a506fddf823-kube-api-access-6mc2r\") on node \"crc\" DevicePath \"\"" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.546408 4940 generic.go:334] "Generic (PLEG): container finished" podID="327e86d8-0308-42e1-a903-5a506fddf823" containerID="f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be" exitCode=0 Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.546461 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqpp4" event={"ID":"327e86d8-0308-42e1-a903-5a506fddf823","Type":"ContainerDied","Data":"f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be"} Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.546496 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wqpp4" event={"ID":"327e86d8-0308-42e1-a903-5a506fddf823","Type":"ContainerDied","Data":"88b64f6d29ce73feb1050d998299c0e5f695e673e094541ff9cb255b917bace4"} Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.546495 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wqpp4" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.546552 4940 scope.go:117] "RemoveContainer" containerID="f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.567335 4940 scope.go:117] "RemoveContainer" containerID="bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.574325 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "327e86d8-0308-42e1-a903-5a506fddf823" (UID: "327e86d8-0308-42e1-a903-5a506fddf823"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.586791 4940 scope.go:117] "RemoveContainer" containerID="80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.611684 4940 scope.go:117] "RemoveContainer" containerID="f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be" Nov 26 08:29:15 crc kubenswrapper[4940]: E1126 08:29:15.612173 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be\": container with ID starting with f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be not found: ID does not exist" containerID="f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.612224 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be"} err="failed to get container status \"f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be\": rpc error: code = NotFound desc = could not find container \"f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be\": container with ID starting with f421489ebb5d228f85d587e24eda2f96ee6fa2c97980b5d0722b7b791b13b4be not found: ID does not exist" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.612259 4940 scope.go:117] "RemoveContainer" containerID="bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9" Nov 26 08:29:15 crc kubenswrapper[4940]: E1126 08:29:15.612607 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9\": container with ID starting with bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9 not found: ID does not exist" containerID="bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.612723 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9"} err="failed to get container status \"bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9\": rpc error: code = NotFound desc = could not find container \"bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9\": container with ID starting with bacfbb3ff33cff67f21dc8febff37f53c535225299f8531acc8be34478c499f9 not found: ID does not exist" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.612823 4940 scope.go:117] "RemoveContainer" containerID="80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5" Nov 26 08:29:15 crc kubenswrapper[4940]: E1126 08:29:15.613306 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5\": container with ID starting with 80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5 not found: ID does not exist" containerID="80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.613349 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5"} err="failed to get container status \"80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5\": rpc error: code = NotFound desc = could not find container \"80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5\": container with ID starting with 80c13176001ed836c6daaf010aacec8332905b11a6cbbd8a58bc19d6aa1464c5 not found: ID does not exist" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.664611 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/327e86d8-0308-42e1-a903-5a506fddf823-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.888556 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wqpp4"] Nov 26 08:29:15 crc kubenswrapper[4940]: I1126 08:29:15.897480 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wqpp4"] Nov 26 08:29:17 crc kubenswrapper[4940]: I1126 08:29:17.181409 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="327e86d8-0308-42e1-a903-5a506fddf823" path="/var/lib/kubelet/pods/327e86d8-0308-42e1-a903-5a506fddf823/volumes" Nov 26 08:29:21 crc kubenswrapper[4940]: I1126 08:29:21.728886 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:29:21 crc kubenswrapper[4940]: I1126 08:29:21.729637 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:29:51 crc kubenswrapper[4940]: I1126 08:29:51.728496 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:29:51 crc kubenswrapper[4940]: I1126 08:29:51.728990 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.891767 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85458b99fc-qbqch"] Nov 26 08:29:57 crc kubenswrapper[4940]: E1126 08:29:57.895918 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="327e86d8-0308-42e1-a903-5a506fddf823" containerName="extract-utilities" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.895944 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="327e86d8-0308-42e1-a903-5a506fddf823" containerName="extract-utilities" Nov 26 08:29:57 crc kubenswrapper[4940]: E1126 08:29:57.895957 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="327e86d8-0308-42e1-a903-5a506fddf823" containerName="registry-server" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.895964 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="327e86d8-0308-42e1-a903-5a506fddf823" containerName="registry-server" Nov 26 08:29:57 crc kubenswrapper[4940]: E1126 08:29:57.895986 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="327e86d8-0308-42e1-a903-5a506fddf823" containerName="extract-content" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.895993 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="327e86d8-0308-42e1-a903-5a506fddf823" containerName="extract-content" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.896197 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="327e86d8-0308-42e1-a903-5a506fddf823" containerName="registry-server" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.897120 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.899937 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.899991 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.900485 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.900613 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-w7crk" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.908879 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85458b99fc-qbqch"] Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.940569 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-997dbfdb7-kzkhd"] Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.941861 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.944633 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 08:29:57 crc kubenswrapper[4940]: I1126 08:29:57.964257 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-997dbfdb7-kzkhd"] Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.033054 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f29586f6-dfd2-4db1-a55e-b9919913eac4-config\") pod \"dnsmasq-dns-85458b99fc-qbqch\" (UID: \"f29586f6-dfd2-4db1-a55e-b9919913eac4\") " pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.033179 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-944c9\" (UniqueName: \"kubernetes.io/projected/f29586f6-dfd2-4db1-a55e-b9919913eac4-kube-api-access-944c9\") pod \"dnsmasq-dns-85458b99fc-qbqch\" (UID: \"f29586f6-dfd2-4db1-a55e-b9919913eac4\") " pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.134765 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-dns-svc\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.134844 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f29586f6-dfd2-4db1-a55e-b9919913eac4-config\") pod \"dnsmasq-dns-85458b99fc-qbqch\" (UID: \"f29586f6-dfd2-4db1-a55e-b9919913eac4\") " pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.134878 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-config\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.134916 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-944c9\" (UniqueName: \"kubernetes.io/projected/f29586f6-dfd2-4db1-a55e-b9919913eac4-kube-api-access-944c9\") pod \"dnsmasq-dns-85458b99fc-qbqch\" (UID: \"f29586f6-dfd2-4db1-a55e-b9919913eac4\") " pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.135024 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ntvf\" (UniqueName: \"kubernetes.io/projected/f87a3541-0420-4a4e-9019-720ef08d6fd8-kube-api-access-5ntvf\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.135881 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f29586f6-dfd2-4db1-a55e-b9919913eac4-config\") pod \"dnsmasq-dns-85458b99fc-qbqch\" (UID: \"f29586f6-dfd2-4db1-a55e-b9919913eac4\") " pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.165099 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-944c9\" (UniqueName: \"kubernetes.io/projected/f29586f6-dfd2-4db1-a55e-b9919913eac4-kube-api-access-944c9\") pod \"dnsmasq-dns-85458b99fc-qbqch\" (UID: \"f29586f6-dfd2-4db1-a55e-b9919913eac4\") " pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.184249 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85458b99fc-qbqch"] Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.184707 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.242779 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ntvf\" (UniqueName: \"kubernetes.io/projected/f87a3541-0420-4a4e-9019-720ef08d6fd8-kube-api-access-5ntvf\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.242854 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-dns-svc\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.242889 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-config\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.244024 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-config\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.244539 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-dns-svc\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.252212 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79d6ff5cc5-98lkk"] Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.253651 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.288589 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79d6ff5cc5-98lkk"] Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.289919 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ntvf\" (UniqueName: \"kubernetes.io/projected/f87a3541-0420-4a4e-9019-720ef08d6fd8-kube-api-access-5ntvf\") pod \"dnsmasq-dns-997dbfdb7-kzkhd\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.445935 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-config\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.446002 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-dns-svc\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.446099 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spnpf\" (UniqueName: \"kubernetes.io/projected/ac982260-2b74-4742-9257-b82283d5a557-kube-api-access-spnpf\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.548670 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-config\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.548734 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-dns-svc\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.548781 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spnpf\" (UniqueName: \"kubernetes.io/projected/ac982260-2b74-4742-9257-b82283d5a557-kube-api-access-spnpf\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.549243 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-997dbfdb7-kzkhd"] Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.549776 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-dns-svc\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.549786 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.551066 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-config\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.572924 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spnpf\" (UniqueName: \"kubernetes.io/projected/ac982260-2b74-4742-9257-b82283d5a557-kube-api-access-spnpf\") pod \"dnsmasq-dns-79d6ff5cc5-98lkk\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.580208 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6849754c6c-pvdtk"] Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.581345 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.598180 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6849754c6c-pvdtk"] Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.618496 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.751718 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-dns-svc\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.752031 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68zl8\" (UniqueName: \"kubernetes.io/projected/35c9d53f-c724-40f1-9af3-d1cb1f549849-kube-api-access-68zl8\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.752190 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-config\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.816799 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85458b99fc-qbqch"] Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.853433 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-config\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.853506 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-dns-svc\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.853591 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68zl8\" (UniqueName: \"kubernetes.io/projected/35c9d53f-c724-40f1-9af3-d1cb1f549849-kube-api-access-68zl8\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.854929 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-config\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.854955 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-dns-svc\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.872158 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68zl8\" (UniqueName: \"kubernetes.io/projected/35c9d53f-c724-40f1-9af3-d1cb1f549849-kube-api-access-68zl8\") pod \"dnsmasq-dns-6849754c6c-pvdtk\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.884860 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85458b99fc-qbqch" event={"ID":"f29586f6-dfd2-4db1-a55e-b9919913eac4","Type":"ContainerStarted","Data":"8c58389479e0c9ce1bcd23aedbff0b4aec1b5fcda78d2dcca8053667ad161bab"} Nov 26 08:29:58 crc kubenswrapper[4940]: I1126 08:29:58.927949 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.131395 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-997dbfdb7-kzkhd"] Nov 26 08:29:59 crc kubenswrapper[4940]: W1126 08:29:59.134187 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf87a3541_0420_4a4e_9019_720ef08d6fd8.slice/crio-60daf56d5cb162e5167c13c2d466ca1da8fbf53dba7d040094f42f668c459826 WatchSource:0}: Error finding container 60daf56d5cb162e5167c13c2d466ca1da8fbf53dba7d040094f42f668c459826: Status 404 returned error can't find the container with id 60daf56d5cb162e5167c13c2d466ca1da8fbf53dba7d040094f42f668c459826 Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.221917 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79d6ff5cc5-98lkk"] Nov 26 08:29:59 crc kubenswrapper[4940]: W1126 08:29:59.227924 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac982260_2b74_4742_9257_b82283d5a557.slice/crio-56860c5cd895a0bd6b7994443e22543f7899f4930461798d0fbfc414e4f29625 WatchSource:0}: Error finding container 56860c5cd895a0bd6b7994443e22543f7899f4930461798d0fbfc414e4f29625: Status 404 returned error can't find the container with id 56860c5cd895a0bd6b7994443e22543f7899f4930461798d0fbfc414e4f29625 Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.369610 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6849754c6c-pvdtk"] Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.423168 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.424844 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.426739 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.427392 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.428061 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wh9lb" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.428210 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.428360 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.458489 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.567846 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.567925 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.567955 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.568006 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.568029 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.568117 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.568159 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.568181 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cdt7\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-kube-api-access-9cdt7\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.568215 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669080 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669130 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669165 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669183 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669220 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669237 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669273 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669300 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.669321 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cdt7\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-kube-api-access-9cdt7\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.672370 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.672892 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.675584 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.675943 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.676209 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.676351 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a130cb460f4e43bd19fda2daf527cebbfd1afde2c00b1f7cb08238b8ce0d844b/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.677602 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.671667 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.690353 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.691677 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cdt7\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-kube-api-access-9cdt7\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.718155 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.721069 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.723516 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-zp4dl" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.724374 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.724543 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.724729 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.724910 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.738108 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"rabbitmq-server-0\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.747468 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.763686 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.872276 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.872785 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9chq\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-kube-api-access-v9chq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.872823 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5e6166c4-8b55-4937-ad33-b83bae8213d1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.872847 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5e6166c4-8b55-4937-ad33-b83bae8213d1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.872870 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.872888 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.873122 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.873192 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.873215 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.899599 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" event={"ID":"f87a3541-0420-4a4e-9019-720ef08d6fd8","Type":"ContainerStarted","Data":"60daf56d5cb162e5167c13c2d466ca1da8fbf53dba7d040094f42f668c459826"} Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.900921 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" event={"ID":"ac982260-2b74-4742-9257-b82283d5a557","Type":"ContainerStarted","Data":"56860c5cd895a0bd6b7994443e22543f7899f4930461798d0fbfc414e4f29625"} Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.905755 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" event={"ID":"35c9d53f-c724-40f1-9af3-d1cb1f549849","Type":"ContainerStarted","Data":"eb0c20c00f404936ca2ad9aa0c38f0f4ebe9dc0b1fc56c5eabe15a0df72f316e"} Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.974878 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.974925 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9chq\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-kube-api-access-v9chq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.975532 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5e6166c4-8b55-4937-ad33-b83bae8213d1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.975560 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5e6166c4-8b55-4937-ad33-b83bae8213d1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.975585 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.975601 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.975885 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.975951 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.975982 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.976071 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.976544 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.976857 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.977758 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.982025 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.982198 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0fcd7e7869c9fa2f5307e2c7a2c3dbb3b1371b022ffbf50bc645221fc2bfa67d/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.982461 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.982932 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5e6166c4-8b55-4937-ad33-b83bae8213d1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.991081 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5e6166c4-8b55-4937-ad33-b83bae8213d1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:29:59 crc kubenswrapper[4940]: I1126 08:29:59.994602 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9chq\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-kube-api-access-v9chq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.039223 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"rabbitmq-cell1-server-0\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.063898 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.162351 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn"] Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.165509 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.167947 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn"] Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.168776 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.169701 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.282323 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-secret-volume\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.282430 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4lf2\" (UniqueName: \"kubernetes.io/projected/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-kube-api-access-c4lf2\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.283616 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-config-volume\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.292722 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:30:00 crc kubenswrapper[4940]: W1126 08:30:00.339022 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0bd0bd0_8c2a_4913_b47e_b3efba90bc21.slice/crio-716f21081aa9c7f089b23ba8a7617fed38de1a0c60a785f5440db4a9670d5c01 WatchSource:0}: Error finding container 716f21081aa9c7f089b23ba8a7617fed38de1a0c60a785f5440db4a9670d5c01: Status 404 returned error can't find the container with id 716f21081aa9c7f089b23ba8a7617fed38de1a0c60a785f5440db4a9670d5c01 Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.385660 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-secret-volume\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.385729 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4lf2\" (UniqueName: \"kubernetes.io/projected/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-kube-api-access-c4lf2\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.386253 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-config-volume\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.387280 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-config-volume\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.408371 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-secret-volume\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.410925 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4lf2\" (UniqueName: \"kubernetes.io/projected/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-kube-api-access-c4lf2\") pod \"collect-profiles-29402430-nqtqn\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.551006 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.572635 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:30:00 crc kubenswrapper[4940]: W1126 08:30:00.584996 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e6166c4_8b55_4937_ad33_b83bae8213d1.slice/crio-915a91974426f03b74fe34a588cc28d59752fc462deb33eff4d1fd2778360913 WatchSource:0}: Error finding container 915a91974426f03b74fe34a588cc28d59752fc462deb33eff4d1fd2778360913: Status 404 returned error can't find the container with id 915a91974426f03b74fe34a588cc28d59752fc462deb33eff4d1fd2778360913 Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.625026 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.626711 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.633141 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.636626 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.641971 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-dwbqs" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.643269 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.660355 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.665138 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.689905 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2k52\" (UniqueName: \"kubernetes.io/projected/c03b9b4d-9923-4534-94d4-00e6eee88f27-kube-api-access-g2k52\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.690002 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-config-data-default\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.690062 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-19c4ab62-425e-45f1-a943-9d55ccf0b2ba\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-19c4ab62-425e-45f1-a943-9d55ccf0b2ba\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.690091 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-kolla-config\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.690108 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c03b9b4d-9923-4534-94d4-00e6eee88f27-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.690121 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c03b9b4d-9923-4534-94d4-00e6eee88f27-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.690369 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.690403 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c03b9b4d-9923-4534-94d4-00e6eee88f27-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.794522 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c03b9b4d-9923-4534-94d4-00e6eee88f27-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.794653 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2k52\" (UniqueName: \"kubernetes.io/projected/c03b9b4d-9923-4534-94d4-00e6eee88f27-kube-api-access-g2k52\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.794799 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-config-data-default\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.794918 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-19c4ab62-425e-45f1-a943-9d55ccf0b2ba\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-19c4ab62-425e-45f1-a943-9d55ccf0b2ba\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.794984 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-kolla-config\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.795015 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c03b9b4d-9923-4534-94d4-00e6eee88f27-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.795058 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c03b9b4d-9923-4534-94d4-00e6eee88f27-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.795109 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.799299 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.800409 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c03b9b4d-9923-4534-94d4-00e6eee88f27-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.800823 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-kolla-config\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.801173 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c03b9b4d-9923-4534-94d4-00e6eee88f27-config-data-default\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.807350 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.807394 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-19c4ab62-425e-45f1-a943-9d55ccf0b2ba\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-19c4ab62-425e-45f1-a943-9d55ccf0b2ba\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/96c651eb54cb17ce3825cc37e39fd2132767293b63d8b7dd8c0cb4b9808c28fd/globalmount\"" pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.812643 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c03b9b4d-9923-4534-94d4-00e6eee88f27-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.819613 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c03b9b4d-9923-4534-94d4-00e6eee88f27-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.839587 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2k52\" (UniqueName: \"kubernetes.io/projected/c03b9b4d-9923-4534-94d4-00e6eee88f27-kube-api-access-g2k52\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.867976 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-19c4ab62-425e-45f1-a943-9d55ccf0b2ba\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-19c4ab62-425e-45f1-a943-9d55ccf0b2ba\") pod \"openstack-galera-0\" (UID: \"c03b9b4d-9923-4534-94d4-00e6eee88f27\") " pod="openstack/openstack-galera-0" Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.918432 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5e6166c4-8b55-4937-ad33-b83bae8213d1","Type":"ContainerStarted","Data":"915a91974426f03b74fe34a588cc28d59752fc462deb33eff4d1fd2778360913"} Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.919645 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21","Type":"ContainerStarted","Data":"716f21081aa9c7f089b23ba8a7617fed38de1a0c60a785f5440db4a9670d5c01"} Nov 26 08:30:00 crc kubenswrapper[4940]: I1126 08:30:00.966472 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.065392 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn"] Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.098455 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.100737 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.103437 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-bkxsk" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.103492 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.112745 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 08:30:01 crc kubenswrapper[4940]: W1126 08:30:01.135848 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a8e4af9_e7ec_4358_8db3_fd78c2adcfa5.slice/crio-48a94f5d3d5b9f5600614e9d4ae1357801de26faca6a42abc283dbbc3bdf9234 WatchSource:0}: Error finding container 48a94f5d3d5b9f5600614e9d4ae1357801de26faca6a42abc283dbbc3bdf9234: Status 404 returned error can't find the container with id 48a94f5d3d5b9f5600614e9d4ae1357801de26faca6a42abc283dbbc3bdf9234 Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.203630 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/59fdd492-69d6-4325-9c7f-ed4622d6797b-config-data\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.203724 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/59fdd492-69d6-4325-9c7f-ed4622d6797b-kolla-config\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.203792 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6tzc\" (UniqueName: \"kubernetes.io/projected/59fdd492-69d6-4325-9c7f-ed4622d6797b-kube-api-access-p6tzc\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.306332 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/59fdd492-69d6-4325-9c7f-ed4622d6797b-config-data\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.306486 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/59fdd492-69d6-4325-9c7f-ed4622d6797b-kolla-config\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.306632 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6tzc\" (UniqueName: \"kubernetes.io/projected/59fdd492-69d6-4325-9c7f-ed4622d6797b-kube-api-access-p6tzc\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.307790 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/59fdd492-69d6-4325-9c7f-ed4622d6797b-kolla-config\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.307851 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/59fdd492-69d6-4325-9c7f-ed4622d6797b-config-data\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.349087 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6tzc\" (UniqueName: \"kubernetes.io/projected/59fdd492-69d6-4325-9c7f-ed4622d6797b-kube-api-access-p6tzc\") pod \"memcached-0\" (UID: \"59fdd492-69d6-4325-9c7f-ed4622d6797b\") " pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.430759 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.691047 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.931050 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.932374 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c03b9b4d-9923-4534-94d4-00e6eee88f27","Type":"ContainerStarted","Data":"b9e4a1877d034503fce137a1b4f0da7b1dc92a576bdca2cee461e4f3afd1421b"} Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.935081 4940 generic.go:334] "Generic (PLEG): container finished" podID="5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5" containerID="23349b5545f475e792bb33216eb291bceec31a8447eb0809988dcb19cf903e93" exitCode=0 Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.935123 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" event={"ID":"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5","Type":"ContainerDied","Data":"23349b5545f475e792bb33216eb291bceec31a8447eb0809988dcb19cf903e93"} Nov 26 08:30:01 crc kubenswrapper[4940]: I1126 08:30:01.935144 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" event={"ID":"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5","Type":"ContainerStarted","Data":"48a94f5d3d5b9f5600614e9d4ae1357801de26faca6a42abc283dbbc3bdf9234"} Nov 26 08:30:01 crc kubenswrapper[4940]: W1126 08:30:01.940356 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59fdd492_69d6_4325_9c7f_ed4622d6797b.slice/crio-0fb2b1bf1918c423cd42730e5ef1a36fa00e18fe67410655f4de24a665287fb5 WatchSource:0}: Error finding container 0fb2b1bf1918c423cd42730e5ef1a36fa00e18fe67410655f4de24a665287fb5: Status 404 returned error can't find the container with id 0fb2b1bf1918c423cd42730e5ef1a36fa00e18fe67410655f4de24a665287fb5 Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.030972 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.032467 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.039511 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.040049 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.040049 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.040865 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-j2n42" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.047442 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.120713 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/262e1270-2fbd-472b-bbd0-680f16ee060f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.120767 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.120979 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/262e1270-2fbd-472b-bbd0-680f16ee060f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.121050 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.121089 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.121161 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/262e1270-2fbd-472b-bbd0-680f16ee060f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.121343 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98n7b\" (UniqueName: \"kubernetes.io/projected/262e1270-2fbd-472b-bbd0-680f16ee060f-kube-api-access-98n7b\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.121373 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6796da0a-ee8d-4dc1-a6a6-5d4e1dc46c14\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6796da0a-ee8d-4dc1-a6a6-5d4e1dc46c14\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.223287 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/262e1270-2fbd-472b-bbd0-680f16ee060f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.223358 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.223382 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.223414 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/262e1270-2fbd-472b-bbd0-680f16ee060f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.223485 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98n7b\" (UniqueName: \"kubernetes.io/projected/262e1270-2fbd-472b-bbd0-680f16ee060f-kube-api-access-98n7b\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.223503 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6796da0a-ee8d-4dc1-a6a6-5d4e1dc46c14\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6796da0a-ee8d-4dc1-a6a6-5d4e1dc46c14\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.223528 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/262e1270-2fbd-472b-bbd0-680f16ee060f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.223552 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.224451 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.224729 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/262e1270-2fbd-472b-bbd0-680f16ee060f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.225353 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.229059 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/262e1270-2fbd-472b-bbd0-680f16ee060f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.229073 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.229347 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6796da0a-ee8d-4dc1-a6a6-5d4e1dc46c14\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6796da0a-ee8d-4dc1-a6a6-5d4e1dc46c14\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7b0941fe13b679999fe08c9ef38f8c8ebfbac6b8a6b24e6c8bf040b0b2a64078/globalmount\"" pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.231348 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/262e1270-2fbd-472b-bbd0-680f16ee060f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.232797 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/262e1270-2fbd-472b-bbd0-680f16ee060f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.242455 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98n7b\" (UniqueName: \"kubernetes.io/projected/262e1270-2fbd-472b-bbd0-680f16ee060f-kube-api-access-98n7b\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.272524 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6796da0a-ee8d-4dc1-a6a6-5d4e1dc46c14\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6796da0a-ee8d-4dc1-a6a6-5d4e1dc46c14\") pod \"openstack-cell1-galera-0\" (UID: \"262e1270-2fbd-472b-bbd0-680f16ee060f\") " pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.353288 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.834102 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 08:30:02 crc kubenswrapper[4940]: W1126 08:30:02.850516 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod262e1270_2fbd_472b_bbd0_680f16ee060f.slice/crio-356a1b7f21b215815b30a64ce43eecb02aeb4b70641ec2b5410d44341f2e26ab WatchSource:0}: Error finding container 356a1b7f21b215815b30a64ce43eecb02aeb4b70641ec2b5410d44341f2e26ab: Status 404 returned error can't find the container with id 356a1b7f21b215815b30a64ce43eecb02aeb4b70641ec2b5410d44341f2e26ab Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.943105 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"262e1270-2fbd-472b-bbd0-680f16ee060f","Type":"ContainerStarted","Data":"356a1b7f21b215815b30a64ce43eecb02aeb4b70641ec2b5410d44341f2e26ab"} Nov 26 08:30:02 crc kubenswrapper[4940]: I1126 08:30:02.946555 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"59fdd492-69d6-4325-9c7f-ed4622d6797b","Type":"ContainerStarted","Data":"0fb2b1bf1918c423cd42730e5ef1a36fa00e18fe67410655f4de24a665287fb5"} Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.279955 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.341591 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-config-volume\") pod \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.341703 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-secret-volume\") pod \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.341779 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4lf2\" (UniqueName: \"kubernetes.io/projected/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-kube-api-access-c4lf2\") pod \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\" (UID: \"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5\") " Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.343939 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-config-volume" (OuterVolumeSpecName: "config-volume") pod "5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5" (UID: "5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.347661 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5" (UID: "5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.349460 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-kube-api-access-c4lf2" (OuterVolumeSpecName: "kube-api-access-c4lf2") pod "5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5" (UID: "5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5"). InnerVolumeSpecName "kube-api-access-c4lf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.443480 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.443510 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.443522 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4lf2\" (UniqueName: \"kubernetes.io/projected/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5-kube-api-access-c4lf2\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.957761 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" event={"ID":"5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5","Type":"ContainerDied","Data":"48a94f5d3d5b9f5600614e9d4ae1357801de26faca6a42abc283dbbc3bdf9234"} Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.958017 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48a94f5d3d5b9f5600614e9d4ae1357801de26faca6a42abc283dbbc3bdf9234" Nov 26 08:30:03 crc kubenswrapper[4940]: I1126 08:30:03.957816 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn" Nov 26 08:30:04 crc kubenswrapper[4940]: I1126 08:30:04.367500 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f"] Nov 26 08:30:04 crc kubenswrapper[4940]: I1126 08:30:04.382438 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-qwx5f"] Nov 26 08:30:05 crc kubenswrapper[4940]: I1126 08:30:05.178455 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0d6b053-b0ff-42ea-9676-0b59f03b6d9e" path="/var/lib/kubelet/pods/b0d6b053-b0ff-42ea-9676-0b59f03b6d9e/volumes" Nov 26 08:30:09 crc kubenswrapper[4940]: I1126 08:30:09.196316 4940 scope.go:117] "RemoveContainer" containerID="e95dc71a819327999082589106061f0d70d55e281b4dfb73f15c7786b0cd0314" Nov 26 08:30:21 crc kubenswrapper[4940]: I1126 08:30:21.728250 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:30:21 crc kubenswrapper[4940]: I1126 08:30:21.728699 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:30:21 crc kubenswrapper[4940]: I1126 08:30:21.728742 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:30:21 crc kubenswrapper[4940]: I1126 08:30:21.729333 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cc9b88f131c13a12d01b7bae4e849661d4ac4d50a4ca0815449140f6bec70a5d"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:30:21 crc kubenswrapper[4940]: I1126 08:30:21.729381 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://cc9b88f131c13a12d01b7bae4e849661d4ac4d50a4ca0815449140f6bec70a5d" gracePeriod=600 Nov 26 08:30:22 crc kubenswrapper[4940]: I1126 08:30:22.123204 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="cc9b88f131c13a12d01b7bae4e849661d4ac4d50a4ca0815449140f6bec70a5d" exitCode=0 Nov 26 08:30:22 crc kubenswrapper[4940]: I1126 08:30:22.123291 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"cc9b88f131c13a12d01b7bae4e849661d4ac4d50a4ca0815449140f6bec70a5d"} Nov 26 08:30:22 crc kubenswrapper[4940]: I1126 08:30:22.123567 4940 scope.go:117] "RemoveContainer" containerID="d34e07ef5b4e8afc831783934aa9425877c18b85bddf0d04f5f444c8bbcb88b0" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.164265 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4"} Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.170612 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85458b99fc-qbqch" event={"ID":"f29586f6-dfd2-4db1-a55e-b9919913eac4","Type":"ContainerStarted","Data":"9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552"} Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.177710 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.179976 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" event={"ID":"f87a3541-0420-4a4e-9019-720ef08d6fd8","Type":"ContainerStarted","Data":"91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9"} Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.195157 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" event={"ID":"ac982260-2b74-4742-9257-b82283d5a557","Type":"ContainerStarted","Data":"810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3"} Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.322764 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=1.6015812710000001 podStartE2EDuration="25.322724957s" podCreationTimestamp="2025-11-26 08:30:01 +0000 UTC" firstStartedPulling="2025-11-26 08:30:01.942800711 +0000 UTC m=+5703.462942330" lastFinishedPulling="2025-11-26 08:30:25.663944397 +0000 UTC m=+5727.184086016" observedRunningTime="2025-11-26 08:30:26.314291437 +0000 UTC m=+5727.834433066" watchObservedRunningTime="2025-11-26 08:30:26.322724957 +0000 UTC m=+5727.842866576" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.472681 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.545411 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.619998 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f29586f6-dfd2-4db1-a55e-b9919913eac4-config\") pod \"f29586f6-dfd2-4db1-a55e-b9919913eac4\" (UID: \"f29586f6-dfd2-4db1-a55e-b9919913eac4\") " Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.620453 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-944c9\" (UniqueName: \"kubernetes.io/projected/f29586f6-dfd2-4db1-a55e-b9919913eac4-kube-api-access-944c9\") pod \"f29586f6-dfd2-4db1-a55e-b9919913eac4\" (UID: \"f29586f6-dfd2-4db1-a55e-b9919913eac4\") " Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.628977 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f29586f6-dfd2-4db1-a55e-b9919913eac4-kube-api-access-944c9" (OuterVolumeSpecName: "kube-api-access-944c9") pod "f29586f6-dfd2-4db1-a55e-b9919913eac4" (UID: "f29586f6-dfd2-4db1-a55e-b9919913eac4"). InnerVolumeSpecName "kube-api-access-944c9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.644866 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f29586f6-dfd2-4db1-a55e-b9919913eac4-config" (OuterVolumeSpecName: "config") pod "f29586f6-dfd2-4db1-a55e-b9919913eac4" (UID: "f29586f6-dfd2-4db1-a55e-b9919913eac4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.721279 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-dns-svc\") pod \"f87a3541-0420-4a4e-9019-720ef08d6fd8\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.721468 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-config\") pod \"f87a3541-0420-4a4e-9019-720ef08d6fd8\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.721511 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ntvf\" (UniqueName: \"kubernetes.io/projected/f87a3541-0420-4a4e-9019-720ef08d6fd8-kube-api-access-5ntvf\") pod \"f87a3541-0420-4a4e-9019-720ef08d6fd8\" (UID: \"f87a3541-0420-4a4e-9019-720ef08d6fd8\") " Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.721821 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-944c9\" (UniqueName: \"kubernetes.io/projected/f29586f6-dfd2-4db1-a55e-b9919913eac4-kube-api-access-944c9\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.721838 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f29586f6-dfd2-4db1-a55e-b9919913eac4-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.780231 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f87a3541-0420-4a4e-9019-720ef08d6fd8-kube-api-access-5ntvf" (OuterVolumeSpecName: "kube-api-access-5ntvf") pod "f87a3541-0420-4a4e-9019-720ef08d6fd8" (UID: "f87a3541-0420-4a4e-9019-720ef08d6fd8"). InnerVolumeSpecName "kube-api-access-5ntvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.823124 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ntvf\" (UniqueName: \"kubernetes.io/projected/f87a3541-0420-4a4e-9019-720ef08d6fd8-kube-api-access-5ntvf\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.912859 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-config" (OuterVolumeSpecName: "config") pod "f87a3541-0420-4a4e-9019-720ef08d6fd8" (UID: "f87a3541-0420-4a4e-9019-720ef08d6fd8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:30:26 crc kubenswrapper[4940]: I1126 08:30:26.924553 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.092031 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f87a3541-0420-4a4e-9019-720ef08d6fd8" (UID: "f87a3541-0420-4a4e-9019-720ef08d6fd8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.127991 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f87a3541-0420-4a4e-9019-720ef08d6fd8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.207454 4940 generic.go:334] "Generic (PLEG): container finished" podID="35c9d53f-c724-40f1-9af3-d1cb1f549849" containerID="b8c6c19257d4bcb96a7e7e2b3046586cb626e493de17619ec5c01e9872efe0b5" exitCode=0 Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.207556 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" event={"ID":"35c9d53f-c724-40f1-9af3-d1cb1f549849","Type":"ContainerDied","Data":"b8c6c19257d4bcb96a7e7e2b3046586cb626e493de17619ec5c01e9872efe0b5"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.210468 4940 generic.go:334] "Generic (PLEG): container finished" podID="f29586f6-dfd2-4db1-a55e-b9919913eac4" containerID="9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552" exitCode=0 Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.210541 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85458b99fc-qbqch" event={"ID":"f29586f6-dfd2-4db1-a55e-b9919913eac4","Type":"ContainerDied","Data":"9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.210576 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85458b99fc-qbqch" event={"ID":"f29586f6-dfd2-4db1-a55e-b9919913eac4","Type":"ContainerDied","Data":"8c58389479e0c9ce1bcd23aedbff0b4aec1b5fcda78d2dcca8053667ad161bab"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.210596 4940 scope.go:117] "RemoveContainer" containerID="9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.210720 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85458b99fc-qbqch" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.215908 4940 generic.go:334] "Generic (PLEG): container finished" podID="ac982260-2b74-4742-9257-b82283d5a557" containerID="810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3" exitCode=0 Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.215981 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" event={"ID":"ac982260-2b74-4742-9257-b82283d5a557","Type":"ContainerDied","Data":"810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.228069 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"59fdd492-69d6-4325-9c7f-ed4622d6797b","Type":"ContainerStarted","Data":"faa7d59c0c6cfff68f4b5085fb34032cd73f1c41bc6dcf99a686b166610be91e"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.230001 4940 generic.go:334] "Generic (PLEG): container finished" podID="f87a3541-0420-4a4e-9019-720ef08d6fd8" containerID="91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9" exitCode=0 Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.230117 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.230142 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" event={"ID":"f87a3541-0420-4a4e-9019-720ef08d6fd8","Type":"ContainerDied","Data":"91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.230945 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-997dbfdb7-kzkhd" event={"ID":"f87a3541-0420-4a4e-9019-720ef08d6fd8","Type":"ContainerDied","Data":"60daf56d5cb162e5167c13c2d466ca1da8fbf53dba7d040094f42f668c459826"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.232315 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c03b9b4d-9923-4534-94d4-00e6eee88f27","Type":"ContainerStarted","Data":"819aa4b9326d9aa021fe25bd614812cf059896d8771ea6c28a58732910caa4fb"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.236122 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"262e1270-2fbd-472b-bbd0-680f16ee060f","Type":"ContainerStarted","Data":"e65bdcb7c413ceed19e2bb15de107e83e6725a3505903b3534c005229b9bb8e9"} Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.257140 4940 scope.go:117] "RemoveContainer" containerID="9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552" Nov 26 08:30:27 crc kubenswrapper[4940]: E1126 08:30:27.257777 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552\": container with ID starting with 9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552 not found: ID does not exist" containerID="9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.257840 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552"} err="failed to get container status \"9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552\": rpc error: code = NotFound desc = could not find container \"9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552\": container with ID starting with 9119e087754c20d77380e1d20801dfde1bc70e80f76537f9b24cee927d798552 not found: ID does not exist" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.257875 4940 scope.go:117] "RemoveContainer" containerID="91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.326121 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85458b99fc-qbqch"] Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.329353 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85458b99fc-qbqch"] Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.370025 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-997dbfdb7-kzkhd"] Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.384066 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-997dbfdb7-kzkhd"] Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.416439 4940 scope.go:117] "RemoveContainer" containerID="91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9" Nov 26 08:30:27 crc kubenswrapper[4940]: E1126 08:30:27.417155 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9\": container with ID starting with 91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9 not found: ID does not exist" containerID="91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9" Nov 26 08:30:27 crc kubenswrapper[4940]: I1126 08:30:27.417218 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9"} err="failed to get container status \"91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9\": rpc error: code = NotFound desc = could not find container \"91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9\": container with ID starting with 91ea7024a5b5c96787aea432671b36b56713852d7eb1de5e9124a41b300b4de9 not found: ID does not exist" Nov 26 08:30:28 crc kubenswrapper[4940]: I1126 08:30:28.248524 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21","Type":"ContainerStarted","Data":"93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d"} Nov 26 08:30:28 crc kubenswrapper[4940]: I1126 08:30:28.251626 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" event={"ID":"35c9d53f-c724-40f1-9af3-d1cb1f549849","Type":"ContainerStarted","Data":"62600d7c2a61674dcabf1bda9110796a913f0e12cc119da1c0885ca8b1a0bbba"} Nov 26 08:30:28 crc kubenswrapper[4940]: I1126 08:30:28.252358 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:30:28 crc kubenswrapper[4940]: I1126 08:30:28.256602 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" event={"ID":"ac982260-2b74-4742-9257-b82283d5a557","Type":"ContainerStarted","Data":"ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71"} Nov 26 08:30:28 crc kubenswrapper[4940]: I1126 08:30:28.256724 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:30:28 crc kubenswrapper[4940]: I1126 08:30:28.258007 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5e6166c4-8b55-4937-ad33-b83bae8213d1","Type":"ContainerStarted","Data":"f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d"} Nov 26 08:30:28 crc kubenswrapper[4940]: I1126 08:30:28.279879 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" podStartSLOduration=3.901237294 podStartE2EDuration="30.279852321s" podCreationTimestamp="2025-11-26 08:29:58 +0000 UTC" firstStartedPulling="2025-11-26 08:29:59.386425079 +0000 UTC m=+5700.906566698" lastFinishedPulling="2025-11-26 08:30:25.765040106 +0000 UTC m=+5727.285181725" observedRunningTime="2025-11-26 08:30:28.271591877 +0000 UTC m=+5729.791733516" watchObservedRunningTime="2025-11-26 08:30:28.279852321 +0000 UTC m=+5729.799993960" Nov 26 08:30:28 crc kubenswrapper[4940]: I1126 08:30:28.294443 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" podStartSLOduration=3.829010496 podStartE2EDuration="30.294421566s" podCreationTimestamp="2025-11-26 08:29:58 +0000 UTC" firstStartedPulling="2025-11-26 08:29:59.230175868 +0000 UTC m=+5700.750317487" lastFinishedPulling="2025-11-26 08:30:25.695586938 +0000 UTC m=+5727.215728557" observedRunningTime="2025-11-26 08:30:28.287837986 +0000 UTC m=+5729.807979645" watchObservedRunningTime="2025-11-26 08:30:28.294421566 +0000 UTC m=+5729.814563195" Nov 26 08:30:29 crc kubenswrapper[4940]: I1126 08:30:29.174386 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f29586f6-dfd2-4db1-a55e-b9919913eac4" path="/var/lib/kubelet/pods/f29586f6-dfd2-4db1-a55e-b9919913eac4/volumes" Nov 26 08:30:29 crc kubenswrapper[4940]: I1126 08:30:29.174910 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f87a3541-0420-4a4e-9019-720ef08d6fd8" path="/var/lib/kubelet/pods/f87a3541-0420-4a4e-9019-720ef08d6fd8/volumes" Nov 26 08:30:30 crc kubenswrapper[4940]: I1126 08:30:30.280286 4940 generic.go:334] "Generic (PLEG): container finished" podID="262e1270-2fbd-472b-bbd0-680f16ee060f" containerID="e65bdcb7c413ceed19e2bb15de107e83e6725a3505903b3534c005229b9bb8e9" exitCode=0 Nov 26 08:30:30 crc kubenswrapper[4940]: I1126 08:30:30.280387 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"262e1270-2fbd-472b-bbd0-680f16ee060f","Type":"ContainerDied","Data":"e65bdcb7c413ceed19e2bb15de107e83e6725a3505903b3534c005229b9bb8e9"} Nov 26 08:30:30 crc kubenswrapper[4940]: I1126 08:30:30.285145 4940 generic.go:334] "Generic (PLEG): container finished" podID="c03b9b4d-9923-4534-94d4-00e6eee88f27" containerID="819aa4b9326d9aa021fe25bd614812cf059896d8771ea6c28a58732910caa4fb" exitCode=0 Nov 26 08:30:30 crc kubenswrapper[4940]: I1126 08:30:30.285179 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c03b9b4d-9923-4534-94d4-00e6eee88f27","Type":"ContainerDied","Data":"819aa4b9326d9aa021fe25bd614812cf059896d8771ea6c28a58732910caa4fb"} Nov 26 08:30:31 crc kubenswrapper[4940]: I1126 08:30:31.295701 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"262e1270-2fbd-472b-bbd0-680f16ee060f","Type":"ContainerStarted","Data":"d8025be943a6c82a90ec85ffcd78cbde05a3f492568497eb13f3d8142daa317a"} Nov 26 08:30:31 crc kubenswrapper[4940]: I1126 08:30:31.299075 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c03b9b4d-9923-4534-94d4-00e6eee88f27","Type":"ContainerStarted","Data":"a44ef536395306ac361f919238c3a5467cb03d98627126b4afdf7f1dfa04a155"} Nov 26 08:30:31 crc kubenswrapper[4940]: I1126 08:30:31.320819 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=7.420664923 podStartE2EDuration="30.320799029s" podCreationTimestamp="2025-11-26 08:30:01 +0000 UTC" firstStartedPulling="2025-11-26 08:30:02.85332598 +0000 UTC m=+5704.373467599" lastFinishedPulling="2025-11-26 08:30:25.753460086 +0000 UTC m=+5727.273601705" observedRunningTime="2025-11-26 08:30:31.315232951 +0000 UTC m=+5732.835374580" watchObservedRunningTime="2025-11-26 08:30:31.320799029 +0000 UTC m=+5732.840940648" Nov 26 08:30:31 crc kubenswrapper[4940]: I1126 08:30:31.339526 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.376391001 podStartE2EDuration="32.339509616s" podCreationTimestamp="2025-11-26 08:29:59 +0000 UTC" firstStartedPulling="2025-11-26 08:30:01.69948716 +0000 UTC m=+5703.219628779" lastFinishedPulling="2025-11-26 08:30:25.662605775 +0000 UTC m=+5727.182747394" observedRunningTime="2025-11-26 08:30:31.33522741 +0000 UTC m=+5732.855369029" watchObservedRunningTime="2025-11-26 08:30:31.339509616 +0000 UTC m=+5732.859651235" Nov 26 08:30:31 crc kubenswrapper[4940]: I1126 08:30:31.433846 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 26 08:30:32 crc kubenswrapper[4940]: I1126 08:30:32.353499 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:32 crc kubenswrapper[4940]: I1126 08:30:32.353900 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:33 crc kubenswrapper[4940]: I1126 08:30:33.620304 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:30:33 crc kubenswrapper[4940]: I1126 08:30:33.930356 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:30:33 crc kubenswrapper[4940]: I1126 08:30:33.988160 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79d6ff5cc5-98lkk"] Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.320910 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" podUID="ac982260-2b74-4742-9257-b82283d5a557" containerName="dnsmasq-dns" containerID="cri-o://ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71" gracePeriod=10 Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.713474 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.872198 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-dns-svc\") pod \"ac982260-2b74-4742-9257-b82283d5a557\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.872386 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spnpf\" (UniqueName: \"kubernetes.io/projected/ac982260-2b74-4742-9257-b82283d5a557-kube-api-access-spnpf\") pod \"ac982260-2b74-4742-9257-b82283d5a557\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.872435 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-config\") pod \"ac982260-2b74-4742-9257-b82283d5a557\" (UID: \"ac982260-2b74-4742-9257-b82283d5a557\") " Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.886332 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac982260-2b74-4742-9257-b82283d5a557-kube-api-access-spnpf" (OuterVolumeSpecName: "kube-api-access-spnpf") pod "ac982260-2b74-4742-9257-b82283d5a557" (UID: "ac982260-2b74-4742-9257-b82283d5a557"). InnerVolumeSpecName "kube-api-access-spnpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.910796 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-config" (OuterVolumeSpecName: "config") pod "ac982260-2b74-4742-9257-b82283d5a557" (UID: "ac982260-2b74-4742-9257-b82283d5a557"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.910809 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ac982260-2b74-4742-9257-b82283d5a557" (UID: "ac982260-2b74-4742-9257-b82283d5a557"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.974559 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.974602 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spnpf\" (UniqueName: \"kubernetes.io/projected/ac982260-2b74-4742-9257-b82283d5a557-kube-api-access-spnpf\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:34 crc kubenswrapper[4940]: I1126 08:30:34.974616 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac982260-2b74-4742-9257-b82283d5a557-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.344111 4940 generic.go:334] "Generic (PLEG): container finished" podID="ac982260-2b74-4742-9257-b82283d5a557" containerID="ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71" exitCode=0 Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.344189 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.344235 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" event={"ID":"ac982260-2b74-4742-9257-b82283d5a557","Type":"ContainerDied","Data":"ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71"} Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.344647 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79d6ff5cc5-98lkk" event={"ID":"ac982260-2b74-4742-9257-b82283d5a557","Type":"ContainerDied","Data":"56860c5cd895a0bd6b7994443e22543f7899f4930461798d0fbfc414e4f29625"} Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.344679 4940 scope.go:117] "RemoveContainer" containerID="ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71" Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.383507 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79d6ff5cc5-98lkk"] Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.388785 4940 scope.go:117] "RemoveContainer" containerID="810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3" Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.392425 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79d6ff5cc5-98lkk"] Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.415644 4940 scope.go:117] "RemoveContainer" containerID="ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71" Nov 26 08:30:35 crc kubenswrapper[4940]: E1126 08:30:35.416171 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71\": container with ID starting with ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71 not found: ID does not exist" containerID="ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71" Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.416310 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71"} err="failed to get container status \"ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71\": rpc error: code = NotFound desc = could not find container \"ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71\": container with ID starting with ac5fef855b8fbe7a30ff7519857644eda2921899dca9d5a76be8971f15045b71 not found: ID does not exist" Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.416410 4940 scope.go:117] "RemoveContainer" containerID="810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3" Nov 26 08:30:35 crc kubenswrapper[4940]: E1126 08:30:35.417080 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3\": container with ID starting with 810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3 not found: ID does not exist" containerID="810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3" Nov 26 08:30:35 crc kubenswrapper[4940]: I1126 08:30:35.417118 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3"} err="failed to get container status \"810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3\": rpc error: code = NotFound desc = could not find container \"810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3\": container with ID starting with 810e5455f198015b47b6ccc3b113f022a58035043ee83c395c3e9b4df69281c3 not found: ID does not exist" Nov 26 08:30:36 crc kubenswrapper[4940]: I1126 08:30:36.876369 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:36 crc kubenswrapper[4940]: I1126 08:30:36.949929 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 26 08:30:37 crc kubenswrapper[4940]: I1126 08:30:37.175982 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac982260-2b74-4742-9257-b82283d5a557" path="/var/lib/kubelet/pods/ac982260-2b74-4742-9257-b82283d5a557/volumes" Nov 26 08:30:40 crc kubenswrapper[4940]: I1126 08:30:40.967680 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 26 08:30:40 crc kubenswrapper[4940]: I1126 08:30:40.968460 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 26 08:30:41 crc kubenswrapper[4940]: I1126 08:30:41.069387 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 26 08:30:41 crc kubenswrapper[4940]: I1126 08:30:41.488097 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 26 08:30:59 crc kubenswrapper[4940]: I1126 08:30:59.565572 4940 generic.go:334] "Generic (PLEG): container finished" podID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerID="f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d" exitCode=0 Nov 26 08:30:59 crc kubenswrapper[4940]: I1126 08:30:59.565709 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5e6166c4-8b55-4937-ad33-b83bae8213d1","Type":"ContainerDied","Data":"f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d"} Nov 26 08:30:59 crc kubenswrapper[4940]: I1126 08:30:59.568257 4940 generic.go:334] "Generic (PLEG): container finished" podID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerID="93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d" exitCode=0 Nov 26 08:30:59 crc kubenswrapper[4940]: I1126 08:30:59.568321 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21","Type":"ContainerDied","Data":"93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d"} Nov 26 08:31:00 crc kubenswrapper[4940]: I1126 08:31:00.579520 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5e6166c4-8b55-4937-ad33-b83bae8213d1","Type":"ContainerStarted","Data":"610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf"} Nov 26 08:31:00 crc kubenswrapper[4940]: I1126 08:31:00.580240 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:00 crc kubenswrapper[4940]: I1126 08:31:00.582694 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21","Type":"ContainerStarted","Data":"78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d"} Nov 26 08:31:00 crc kubenswrapper[4940]: I1126 08:31:00.583337 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 08:31:00 crc kubenswrapper[4940]: I1126 08:31:00.605878 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.529906242 podStartE2EDuration="1m2.605860026s" podCreationTimestamp="2025-11-26 08:29:58 +0000 UTC" firstStartedPulling="2025-11-26 08:30:00.58725365 +0000 UTC m=+5702.107395269" lastFinishedPulling="2025-11-26 08:30:25.663207434 +0000 UTC m=+5727.183349053" observedRunningTime="2025-11-26 08:31:00.604526904 +0000 UTC m=+5762.124668533" watchObservedRunningTime="2025-11-26 08:31:00.605860026 +0000 UTC m=+5762.126001655" Nov 26 08:31:00 crc kubenswrapper[4940]: I1126 08:31:00.633263 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.314055268 podStartE2EDuration="1m2.63324285s" podCreationTimestamp="2025-11-26 08:29:58 +0000 UTC" firstStartedPulling="2025-11-26 08:30:00.344050013 +0000 UTC m=+5701.864191632" lastFinishedPulling="2025-11-26 08:30:25.663237595 +0000 UTC m=+5727.183379214" observedRunningTime="2025-11-26 08:31:00.631078362 +0000 UTC m=+5762.151220001" watchObservedRunningTime="2025-11-26 08:31:00.63324285 +0000 UTC m=+5762.153384479" Nov 26 08:31:09 crc kubenswrapper[4940]: I1126 08:31:09.766938 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 08:31:10 crc kubenswrapper[4940]: I1126 08:31:10.066058 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.104086 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fc86c8ff7-cfdk6"] Nov 26 08:31:16 crc kubenswrapper[4940]: E1126 08:31:16.105084 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f29586f6-dfd2-4db1-a55e-b9919913eac4" containerName="init" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105106 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f29586f6-dfd2-4db1-a55e-b9919913eac4" containerName="init" Nov 26 08:31:16 crc kubenswrapper[4940]: E1126 08:31:16.105137 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac982260-2b74-4742-9257-b82283d5a557" containerName="dnsmasq-dns" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105150 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac982260-2b74-4742-9257-b82283d5a557" containerName="dnsmasq-dns" Nov 26 08:31:16 crc kubenswrapper[4940]: E1126 08:31:16.105169 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac982260-2b74-4742-9257-b82283d5a557" containerName="init" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105180 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac982260-2b74-4742-9257-b82283d5a557" containerName="init" Nov 26 08:31:16 crc kubenswrapper[4940]: E1126 08:31:16.105204 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5" containerName="collect-profiles" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105216 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5" containerName="collect-profiles" Nov 26 08:31:16 crc kubenswrapper[4940]: E1126 08:31:16.105241 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87a3541-0420-4a4e-9019-720ef08d6fd8" containerName="init" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105252 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87a3541-0420-4a4e-9019-720ef08d6fd8" containerName="init" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105453 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87a3541-0420-4a4e-9019-720ef08d6fd8" containerName="init" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105485 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f29586f6-dfd2-4db1-a55e-b9919913eac4" containerName="init" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105499 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac982260-2b74-4742-9257-b82283d5a557" containerName="dnsmasq-dns" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.105516 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5" containerName="collect-profiles" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.106747 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.118792 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fc86c8ff7-cfdk6"] Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.244622 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-config\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.244708 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x76l4\" (UniqueName: \"kubernetes.io/projected/0624efda-c161-496f-9bbb-8519e74d058d-kube-api-access-x76l4\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.244877 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-dns-svc\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.347097 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x76l4\" (UniqueName: \"kubernetes.io/projected/0624efda-c161-496f-9bbb-8519e74d058d-kube-api-access-x76l4\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.347259 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-dns-svc\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.347589 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-config\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.348443 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-dns-svc\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.349273 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-config\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.368198 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x76l4\" (UniqueName: \"kubernetes.io/projected/0624efda-c161-496f-9bbb-8519e74d058d-kube-api-access-x76l4\") pod \"dnsmasq-dns-5fc86c8ff7-cfdk6\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.442785 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.713959 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:31:16 crc kubenswrapper[4940]: I1126 08:31:16.902736 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fc86c8ff7-cfdk6"] Nov 26 08:31:17 crc kubenswrapper[4940]: I1126 08:31:17.432596 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:31:17 crc kubenswrapper[4940]: I1126 08:31:17.730416 4940 generic.go:334] "Generic (PLEG): container finished" podID="0624efda-c161-496f-9bbb-8519e74d058d" containerID="9840f5e1bb19e9222e69be48e81b621c2cdc8ec3c4ec3d67a29caf795d6e0f26" exitCode=0 Nov 26 08:31:17 crc kubenswrapper[4940]: I1126 08:31:17.730473 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" event={"ID":"0624efda-c161-496f-9bbb-8519e74d058d","Type":"ContainerDied","Data":"9840f5e1bb19e9222e69be48e81b621c2cdc8ec3c4ec3d67a29caf795d6e0f26"} Nov 26 08:31:17 crc kubenswrapper[4940]: I1126 08:31:17.730507 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" event={"ID":"0624efda-c161-496f-9bbb-8519e74d058d","Type":"ContainerStarted","Data":"0dfa3e481af58ff24de99e16275c17b4d8401ff77a0c42c31db7ec57f3e40f48"} Nov 26 08:31:18 crc kubenswrapper[4940]: I1126 08:31:18.644641 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerName="rabbitmq" containerID="cri-o://78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d" gracePeriod=604799 Nov 26 08:31:18 crc kubenswrapper[4940]: I1126 08:31:18.742823 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" event={"ID":"0624efda-c161-496f-9bbb-8519e74d058d","Type":"ContainerStarted","Data":"9a923e1676d6f2cc68099d52bf3e2b97e4464e20870896b82f86625fbb151cea"} Nov 26 08:31:18 crc kubenswrapper[4940]: I1126 08:31:18.743095 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:18 crc kubenswrapper[4940]: I1126 08:31:18.761178 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" podStartSLOduration=2.761154757 podStartE2EDuration="2.761154757s" podCreationTimestamp="2025-11-26 08:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:31:18.760603519 +0000 UTC m=+5780.280745138" watchObservedRunningTime="2025-11-26 08:31:18.761154757 +0000 UTC m=+5780.281296406" Nov 26 08:31:19 crc kubenswrapper[4940]: I1126 08:31:19.102917 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerName="rabbitmq" containerID="cri-o://610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf" gracePeriod=604799 Nov 26 08:31:19 crc kubenswrapper[4940]: I1126 08:31:19.765569 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.20:5672: connect: connection refused" Nov 26 08:31:20 crc kubenswrapper[4940]: I1126 08:31:20.064833 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.21:5672: connect: connection refused" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.306298 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.492717 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-server-conf\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.493102 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-pod-info\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.493139 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-plugins\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.493340 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.493370 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-confd\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.493396 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-erlang-cookie-secret\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.493416 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cdt7\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-kube-api-access-9cdt7\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.493454 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-erlang-cookie\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.493493 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-plugins-conf\") pod \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\" (UID: \"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.494266 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.496565 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.496592 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.502331 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-pod-info" (OuterVolumeSpecName: "pod-info") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.505511 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-kube-api-access-9cdt7" (OuterVolumeSpecName: "kube-api-access-9cdt7") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "kube-api-access-9cdt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.506545 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5" (OuterVolumeSpecName: "persistence") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.510186 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.517218 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-server-conf" (OuterVolumeSpecName: "server-conf") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.580967 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" (UID: "b0bd0bd0-8c2a-4913-b47e-b3efba90bc21"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.595591 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.595650 4940 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.595673 4940 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.595691 4940 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.595707 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.595782 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") on node \"crc\" " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.595803 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.599171 4940 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.599188 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cdt7\" (UniqueName: \"kubernetes.io/projected/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21-kube-api-access-9cdt7\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.617568 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.621912 4940 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.622359 4940 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5") on node "crc" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.699849 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-erlang-cookie\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.699956 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5e6166c4-8b55-4937-ad33-b83bae8213d1-erlang-cookie-secret\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.700014 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5e6166c4-8b55-4937-ad33-b83bae8213d1-pod-info\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.700073 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9chq\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-kube-api-access-v9chq\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.700111 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-plugins-conf\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.700307 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.700388 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-confd\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.700437 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-plugins\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.700487 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-server-conf\") pod \"5e6166c4-8b55-4937-ad33-b83bae8213d1\" (UID: \"5e6166c4-8b55-4937-ad33-b83bae8213d1\") " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.700719 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.701081 4940 reconciler_common.go:293] "Volume detached for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.701106 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.701789 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.703112 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.709564 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e6166c4-8b55-4937-ad33-b83bae8213d1-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.715493 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/5e6166c4-8b55-4937-ad33-b83bae8213d1-pod-info" (OuterVolumeSpecName: "pod-info") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.715671 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-kube-api-access-v9chq" (OuterVolumeSpecName: "kube-api-access-v9chq") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "kube-api-access-v9chq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.723223 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80" (OuterVolumeSpecName: "persistence") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.727203 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-server-conf" (OuterVolumeSpecName: "server-conf") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.776667 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "5e6166c4-8b55-4937-ad33-b83bae8213d1" (UID: "5e6166c4-8b55-4937-ad33-b83bae8213d1"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.802370 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.802407 4940 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5e6166c4-8b55-4937-ad33-b83bae8213d1-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.802421 4940 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.802429 4940 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5e6166c4-8b55-4937-ad33-b83bae8213d1-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.802439 4940 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5e6166c4-8b55-4937-ad33-b83bae8213d1-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.802450 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9chq\" (UniqueName: \"kubernetes.io/projected/5e6166c4-8b55-4937-ad33-b83bae8213d1-kube-api-access-v9chq\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.802459 4940 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5e6166c4-8b55-4937-ad33-b83bae8213d1-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.802495 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") on node \"crc\" " Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.818906 4940 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.819144 4940 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80") on node "crc" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.822983 4940 generic.go:334] "Generic (PLEG): container finished" podID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerID="78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d" exitCode=0 Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.823074 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21","Type":"ContainerDied","Data":"78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d"} Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.823296 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b0bd0bd0-8c2a-4913-b47e-b3efba90bc21","Type":"ContainerDied","Data":"716f21081aa9c7f089b23ba8a7617fed38de1a0c60a785f5440db4a9670d5c01"} Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.823318 4940 scope.go:117] "RemoveContainer" containerID="78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.823408 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.826236 4940 generic.go:334] "Generic (PLEG): container finished" podID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerID="610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf" exitCode=0 Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.826281 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5e6166c4-8b55-4937-ad33-b83bae8213d1","Type":"ContainerDied","Data":"610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf"} Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.826312 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5e6166c4-8b55-4937-ad33-b83bae8213d1","Type":"ContainerDied","Data":"915a91974426f03b74fe34a588cc28d59752fc462deb33eff4d1fd2778360913"} Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.826329 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.845510 4940 scope.go:117] "RemoveContainer" containerID="93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.857488 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.863877 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.879407 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.886866 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.897262 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:31:25 crc kubenswrapper[4940]: E1126 08:31:25.897721 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerName="setup-container" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.897748 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerName="setup-container" Nov 26 08:31:25 crc kubenswrapper[4940]: E1126 08:31:25.897773 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerName="rabbitmq" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.897782 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerName="rabbitmq" Nov 26 08:31:25 crc kubenswrapper[4940]: E1126 08:31:25.897803 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerName="setup-container" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.897811 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerName="setup-container" Nov 26 08:31:25 crc kubenswrapper[4940]: E1126 08:31:25.897832 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerName="rabbitmq" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.897838 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerName="rabbitmq" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.898084 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e6166c4-8b55-4937-ad33-b83bae8213d1" containerName="rabbitmq" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.898108 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" containerName="rabbitmq" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.899179 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.900794 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.901033 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.902150 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.902317 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.902531 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wh9lb" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.903590 4940 reconciler_common.go:293] "Volume detached for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.917212 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.921169 4940 scope.go:117] "RemoveContainer" containerID="78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d" Nov 26 08:31:25 crc kubenswrapper[4940]: E1126 08:31:25.922150 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d\": container with ID starting with 78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d not found: ID does not exist" containerID="78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.922181 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d"} err="failed to get container status \"78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d\": rpc error: code = NotFound desc = could not find container \"78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d\": container with ID starting with 78e4b2db41391506f88f1b932b0f44ea775234317db0565534ccc7c6b3250f9d not found: ID does not exist" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.922200 4940 scope.go:117] "RemoveContainer" containerID="93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.931158 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:31:25 crc kubenswrapper[4940]: E1126 08:31:25.939365 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d\": container with ID starting with 93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d not found: ID does not exist" containerID="93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.940691 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d"} err="failed to get container status \"93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d\": rpc error: code = NotFound desc = could not find container \"93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d\": container with ID starting with 93233f55d629b23126f480632071e66bc074b4fb9f0867fd79ca82eaf615435d not found: ID does not exist" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.940824 4940 scope.go:117] "RemoveContainer" containerID="610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.943418 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.947148 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.951433 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.951667 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-zp4dl" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.951865 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.952400 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.970024 4940 scope.go:117] "RemoveContainer" containerID="f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.977431 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.991774 4940 scope.go:117] "RemoveContainer" containerID="610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf" Nov 26 08:31:25 crc kubenswrapper[4940]: E1126 08:31:25.992227 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf\": container with ID starting with 610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf not found: ID does not exist" containerID="610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.992299 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf"} err="failed to get container status \"610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf\": rpc error: code = NotFound desc = could not find container \"610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf\": container with ID starting with 610d169d35a77b05ebfe79253daf458abbcf10d160696b9ee7aa2840bb1fe3bf not found: ID does not exist" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.992330 4940 scope.go:117] "RemoveContainer" containerID="f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d" Nov 26 08:31:25 crc kubenswrapper[4940]: E1126 08:31:25.992849 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d\": container with ID starting with f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d not found: ID does not exist" containerID="f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d" Nov 26 08:31:25 crc kubenswrapper[4940]: I1126 08:31:25.992882 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d"} err="failed to get container status \"f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d\": rpc error: code = NotFound desc = could not find container \"f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d\": container with ID starting with f020b9f009a1c1cf66607e1e9711c8964dbefc0705555da3fb5a74a275ac350d not found: ID does not exist" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.004660 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.004714 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.004813 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0981b326-3444-45d7-b19d-5a33f431bf84-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.004862 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.004889 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0981b326-3444-45d7-b19d-5a33f431bf84-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.004957 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0981b326-3444-45d7-b19d-5a33f431bf84-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.004993 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0981b326-3444-45d7-b19d-5a33f431bf84-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.005023 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxwh2\" (UniqueName: \"kubernetes.io/projected/0981b326-3444-45d7-b19d-5a33f431bf84-kube-api-access-zxwh2\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.005094 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.106323 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2r2k\" (UniqueName: \"kubernetes.io/projected/2a38e5e7-6d1d-4906-a469-2103514fc67b-kube-api-access-m2r2k\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.106645 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.106753 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.106844 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0981b326-3444-45d7-b19d-5a33f431bf84-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.106936 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107022 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0981b326-3444-45d7-b19d-5a33f431bf84-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107140 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107226 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107307 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2a38e5e7-6d1d-4906-a469-2103514fc67b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107449 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0981b326-3444-45d7-b19d-5a33f431bf84-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107506 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2a38e5e7-6d1d-4906-a469-2103514fc67b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107547 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107602 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2a38e5e7-6d1d-4906-a469-2103514fc67b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107631 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0981b326-3444-45d7-b19d-5a33f431bf84-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107660 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2a38e5e7-6d1d-4906-a469-2103514fc67b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107694 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxwh2\" (UniqueName: \"kubernetes.io/projected/0981b326-3444-45d7-b19d-5a33f431bf84-kube-api-access-zxwh2\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107714 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.107788 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.108223 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.108312 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.108904 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0981b326-3444-45d7-b19d-5a33f431bf84-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.108995 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0981b326-3444-45d7-b19d-5a33f431bf84-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.110920 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.111275 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a130cb460f4e43bd19fda2daf527cebbfd1afde2c00b1f7cb08238b8ce0d844b/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.113317 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0981b326-3444-45d7-b19d-5a33f431bf84-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.113360 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0981b326-3444-45d7-b19d-5a33f431bf84-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.114791 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0981b326-3444-45d7-b19d-5a33f431bf84-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.132106 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxwh2\" (UniqueName: \"kubernetes.io/projected/0981b326-3444-45d7-b19d-5a33f431bf84-kube-api-access-zxwh2\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.135668 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b2dc300-7a0a-480c-af96-fdc783acdff5\") pod \"rabbitmq-server-0\" (UID: \"0981b326-3444-45d7-b19d-5a33f431bf84\") " pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210097 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2a38e5e7-6d1d-4906-a469-2103514fc67b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210189 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210255 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2r2k\" (UniqueName: \"kubernetes.io/projected/2a38e5e7-6d1d-4906-a469-2103514fc67b-kube-api-access-m2r2k\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210347 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210375 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210433 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2a38e5e7-6d1d-4906-a469-2103514fc67b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210460 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2a38e5e7-6d1d-4906-a469-2103514fc67b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210491 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.210515 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2a38e5e7-6d1d-4906-a469-2103514fc67b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.211240 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.211367 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.211786 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2a38e5e7-6d1d-4906-a469-2103514fc67b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.212792 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2a38e5e7-6d1d-4906-a469-2103514fc67b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.214176 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.214250 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0fcd7e7869c9fa2f5307e2c7a2c3dbb3b1371b022ffbf50bc645221fc2bfa67d/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.214352 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2a38e5e7-6d1d-4906-a469-2103514fc67b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.215359 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2a38e5e7-6d1d-4906-a469-2103514fc67b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.218010 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2a38e5e7-6d1d-4906-a469-2103514fc67b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.243531 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2r2k\" (UniqueName: \"kubernetes.io/projected/2a38e5e7-6d1d-4906-a469-2103514fc67b-kube-api-access-m2r2k\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.256727 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.264665 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f2b701d2-b22f-4007-aa85-521fc6d9de80\") pod \"rabbitmq-cell1-server-0\" (UID: \"2a38e5e7-6d1d-4906-a469-2103514fc67b\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.269914 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.445527 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.523326 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6849754c6c-pvdtk"] Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.523760 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" podUID="35c9d53f-c724-40f1-9af3-d1cb1f549849" containerName="dnsmasq-dns" containerID="cri-o://62600d7c2a61674dcabf1bda9110796a913f0e12cc119da1c0885ca8b1a0bbba" gracePeriod=10 Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.800358 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.846670 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.859735 4940 generic.go:334] "Generic (PLEG): container finished" podID="35c9d53f-c724-40f1-9af3-d1cb1f549849" containerID="62600d7c2a61674dcabf1bda9110796a913f0e12cc119da1c0885ca8b1a0bbba" exitCode=0 Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.859854 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" event={"ID":"35c9d53f-c724-40f1-9af3-d1cb1f549849","Type":"ContainerDied","Data":"62600d7c2a61674dcabf1bda9110796a913f0e12cc119da1c0885ca8b1a0bbba"} Nov 26 08:31:26 crc kubenswrapper[4940]: I1126 08:31:26.860848 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0981b326-3444-45d7-b19d-5a33f431bf84","Type":"ContainerStarted","Data":"dd149bea2462a90d9a759ab476479e67c60e5f47da5fe2656a173191da295ee1"} Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.094352 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.174307 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e6166c4-8b55-4937-ad33-b83bae8213d1" path="/var/lib/kubelet/pods/5e6166c4-8b55-4937-ad33-b83bae8213d1/volumes" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.175194 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0bd0bd0-8c2a-4913-b47e-b3efba90bc21" path="/var/lib/kubelet/pods/b0bd0bd0-8c2a-4913-b47e-b3efba90bc21/volumes" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.233779 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-config\") pod \"35c9d53f-c724-40f1-9af3-d1cb1f549849\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.233930 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-68zl8\" (UniqueName: \"kubernetes.io/projected/35c9d53f-c724-40f1-9af3-d1cb1f549849-kube-api-access-68zl8\") pod \"35c9d53f-c724-40f1-9af3-d1cb1f549849\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.234003 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-dns-svc\") pod \"35c9d53f-c724-40f1-9af3-d1cb1f549849\" (UID: \"35c9d53f-c724-40f1-9af3-d1cb1f549849\") " Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.237565 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35c9d53f-c724-40f1-9af3-d1cb1f549849-kube-api-access-68zl8" (OuterVolumeSpecName: "kube-api-access-68zl8") pod "35c9d53f-c724-40f1-9af3-d1cb1f549849" (UID: "35c9d53f-c724-40f1-9af3-d1cb1f549849"). InnerVolumeSpecName "kube-api-access-68zl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.272926 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-config" (OuterVolumeSpecName: "config") pod "35c9d53f-c724-40f1-9af3-d1cb1f549849" (UID: "35c9d53f-c724-40f1-9af3-d1cb1f549849"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.296288 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "35c9d53f-c724-40f1-9af3-d1cb1f549849" (UID: "35c9d53f-c724-40f1-9af3-d1cb1f549849"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.336179 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.336206 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-68zl8\" (UniqueName: \"kubernetes.io/projected/35c9d53f-c724-40f1-9af3-d1cb1f549849-kube-api-access-68zl8\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.336216 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35c9d53f-c724-40f1-9af3-d1cb1f549849-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.871317 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.871283 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6849754c6c-pvdtk" event={"ID":"35c9d53f-c724-40f1-9af3-d1cb1f549849","Type":"ContainerDied","Data":"eb0c20c00f404936ca2ad9aa0c38f0f4ebe9dc0b1fc56c5eabe15a0df72f316e"} Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.871508 4940 scope.go:117] "RemoveContainer" containerID="62600d7c2a61674dcabf1bda9110796a913f0e12cc119da1c0885ca8b1a0bbba" Nov 26 08:31:27 crc kubenswrapper[4940]: I1126 08:31:27.872793 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2a38e5e7-6d1d-4906-a469-2103514fc67b","Type":"ContainerStarted","Data":"741298f74bb2c54193e9d6a6113d7dd4dc58f8973c5829c0a32419d5062deb33"} Nov 26 08:31:28 crc kubenswrapper[4940]: I1126 08:31:28.115072 4940 scope.go:117] "RemoveContainer" containerID="b8c6c19257d4bcb96a7e7e2b3046586cb626e493de17619ec5c01e9872efe0b5" Nov 26 08:31:28 crc kubenswrapper[4940]: I1126 08:31:28.121130 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6849754c6c-pvdtk"] Nov 26 08:31:28 crc kubenswrapper[4940]: I1126 08:31:28.128516 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6849754c6c-pvdtk"] Nov 26 08:31:28 crc kubenswrapper[4940]: I1126 08:31:28.885616 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0981b326-3444-45d7-b19d-5a33f431bf84","Type":"ContainerStarted","Data":"615a4f69105c2cfe7577b88b39e945e29e5d592eefb178d45fb2de7860c62a3f"} Nov 26 08:31:28 crc kubenswrapper[4940]: I1126 08:31:28.888087 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2a38e5e7-6d1d-4906-a469-2103514fc67b","Type":"ContainerStarted","Data":"d0b9e8526a50fae0f45c403d37c29a0346b679b256e9de6af1d55d334b12ffd2"} Nov 26 08:31:29 crc kubenswrapper[4940]: I1126 08:31:29.176910 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35c9d53f-c724-40f1-9af3-d1cb1f549849" path="/var/lib/kubelet/pods/35c9d53f-c724-40f1-9af3-d1cb1f549849/volumes" Nov 26 08:32:01 crc kubenswrapper[4940]: I1126 08:32:01.208861 4940 generic.go:334] "Generic (PLEG): container finished" podID="0981b326-3444-45d7-b19d-5a33f431bf84" containerID="615a4f69105c2cfe7577b88b39e945e29e5d592eefb178d45fb2de7860c62a3f" exitCode=0 Nov 26 08:32:01 crc kubenswrapper[4940]: I1126 08:32:01.208938 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0981b326-3444-45d7-b19d-5a33f431bf84","Type":"ContainerDied","Data":"615a4f69105c2cfe7577b88b39e945e29e5d592eefb178d45fb2de7860c62a3f"} Nov 26 08:32:01 crc kubenswrapper[4940]: I1126 08:32:01.211580 4940 generic.go:334] "Generic (PLEG): container finished" podID="2a38e5e7-6d1d-4906-a469-2103514fc67b" containerID="d0b9e8526a50fae0f45c403d37c29a0346b679b256e9de6af1d55d334b12ffd2" exitCode=0 Nov 26 08:32:01 crc kubenswrapper[4940]: I1126 08:32:01.211646 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2a38e5e7-6d1d-4906-a469-2103514fc67b","Type":"ContainerDied","Data":"d0b9e8526a50fae0f45c403d37c29a0346b679b256e9de6af1d55d334b12ffd2"} Nov 26 08:32:02 crc kubenswrapper[4940]: I1126 08:32:02.224111 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0981b326-3444-45d7-b19d-5a33f431bf84","Type":"ContainerStarted","Data":"541474594a3cbeac96544e29e54496b0c135818adc92173b75308300cb3df13e"} Nov 26 08:32:02 crc kubenswrapper[4940]: I1126 08:32:02.225367 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 08:32:02 crc kubenswrapper[4940]: I1126 08:32:02.226674 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2a38e5e7-6d1d-4906-a469-2103514fc67b","Type":"ContainerStarted","Data":"3d4da2f4db3d310358ada6ebcfc5844157e92e710b1f2fac6bc2566fcdf2ea08"} Nov 26 08:32:02 crc kubenswrapper[4940]: I1126 08:32:02.226910 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:32:02 crc kubenswrapper[4940]: I1126 08:32:02.255507 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.255488343 podStartE2EDuration="37.255488343s" podCreationTimestamp="2025-11-26 08:31:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:32:02.247935522 +0000 UTC m=+5823.768077181" watchObservedRunningTime="2025-11-26 08:32:02.255488343 +0000 UTC m=+5823.775629972" Nov 26 08:32:02 crc kubenswrapper[4940]: I1126 08:32:02.275218 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.275201763 podStartE2EDuration="37.275201763s" podCreationTimestamp="2025-11-26 08:31:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:32:02.271491934 +0000 UTC m=+5823.791633593" watchObservedRunningTime="2025-11-26 08:32:02.275201763 +0000 UTC m=+5823.795343382" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.161301 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pmzzl"] Nov 26 08:32:05 crc kubenswrapper[4940]: E1126 08:32:05.161827 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c9d53f-c724-40f1-9af3-d1cb1f549849" containerName="init" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.161839 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c9d53f-c724-40f1-9af3-d1cb1f549849" containerName="init" Nov 26 08:32:05 crc kubenswrapper[4940]: E1126 08:32:05.161865 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c9d53f-c724-40f1-9af3-d1cb1f549849" containerName="dnsmasq-dns" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.161871 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c9d53f-c724-40f1-9af3-d1cb1f549849" containerName="dnsmasq-dns" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.162013 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c9d53f-c724-40f1-9af3-d1cb1f549849" containerName="dnsmasq-dns" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.163057 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.184953 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pmzzl"] Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.320599 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7sxl\" (UniqueName: \"kubernetes.io/projected/70586459-b4cc-46c5-a06e-44e3e075150d-kube-api-access-w7sxl\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.320685 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-catalog-content\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.320760 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-utilities\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.422195 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7sxl\" (UniqueName: \"kubernetes.io/projected/70586459-b4cc-46c5-a06e-44e3e075150d-kube-api-access-w7sxl\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.422600 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-catalog-content\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.422833 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-utilities\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.423292 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-catalog-content\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.423446 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-utilities\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.447650 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7sxl\" (UniqueName: \"kubernetes.io/projected/70586459-b4cc-46c5-a06e-44e3e075150d-kube-api-access-w7sxl\") pod \"certified-operators-pmzzl\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:05 crc kubenswrapper[4940]: I1126 08:32:05.484360 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:06 crc kubenswrapper[4940]: I1126 08:32:06.042443 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pmzzl"] Nov 26 08:32:06 crc kubenswrapper[4940]: I1126 08:32:06.261027 4940 generic.go:334] "Generic (PLEG): container finished" podID="70586459-b4cc-46c5-a06e-44e3e075150d" containerID="a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b" exitCode=0 Nov 26 08:32:06 crc kubenswrapper[4940]: I1126 08:32:06.261088 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmzzl" event={"ID":"70586459-b4cc-46c5-a06e-44e3e075150d","Type":"ContainerDied","Data":"a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b"} Nov 26 08:32:06 crc kubenswrapper[4940]: I1126 08:32:06.261113 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmzzl" event={"ID":"70586459-b4cc-46c5-a06e-44e3e075150d","Type":"ContainerStarted","Data":"993ae43b50e7b97d740f878221cd96f7f638a98719e18091e0b9c899005f24ff"} Nov 26 08:32:07 crc kubenswrapper[4940]: I1126 08:32:07.271199 4940 generic.go:334] "Generic (PLEG): container finished" podID="70586459-b4cc-46c5-a06e-44e3e075150d" containerID="85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281" exitCode=0 Nov 26 08:32:07 crc kubenswrapper[4940]: I1126 08:32:07.271328 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmzzl" event={"ID":"70586459-b4cc-46c5-a06e-44e3e075150d","Type":"ContainerDied","Data":"85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281"} Nov 26 08:32:08 crc kubenswrapper[4940]: I1126 08:32:08.280630 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmzzl" event={"ID":"70586459-b4cc-46c5-a06e-44e3e075150d","Type":"ContainerStarted","Data":"d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784"} Nov 26 08:32:15 crc kubenswrapper[4940]: I1126 08:32:15.485067 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:15 crc kubenswrapper[4940]: I1126 08:32:15.485668 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:15 crc kubenswrapper[4940]: I1126 08:32:15.557999 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:15 crc kubenswrapper[4940]: I1126 08:32:15.575983 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pmzzl" podStartSLOduration=9.165378617 podStartE2EDuration="10.575962076s" podCreationTimestamp="2025-11-26 08:32:05 +0000 UTC" firstStartedPulling="2025-11-26 08:32:06.262421802 +0000 UTC m=+5827.782563421" lastFinishedPulling="2025-11-26 08:32:07.673005241 +0000 UTC m=+5829.193146880" observedRunningTime="2025-11-26 08:32:08.298312572 +0000 UTC m=+5829.818454201" watchObservedRunningTime="2025-11-26 08:32:15.575962076 +0000 UTC m=+5837.096103715" Nov 26 08:32:16 crc kubenswrapper[4940]: I1126 08:32:16.260841 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 08:32:16 crc kubenswrapper[4940]: I1126 08:32:16.273218 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 08:32:16 crc kubenswrapper[4940]: I1126 08:32:16.399473 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:16 crc kubenswrapper[4940]: I1126 08:32:16.453397 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pmzzl"] Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.361293 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pmzzl" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" containerName="registry-server" containerID="cri-o://d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784" gracePeriod=2 Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.739799 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.828914 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-utilities\") pod \"70586459-b4cc-46c5-a06e-44e3e075150d\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.829016 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7sxl\" (UniqueName: \"kubernetes.io/projected/70586459-b4cc-46c5-a06e-44e3e075150d-kube-api-access-w7sxl\") pod \"70586459-b4cc-46c5-a06e-44e3e075150d\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.829160 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-catalog-content\") pod \"70586459-b4cc-46c5-a06e-44e3e075150d\" (UID: \"70586459-b4cc-46c5-a06e-44e3e075150d\") " Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.838576 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-utilities" (OuterVolumeSpecName: "utilities") pod "70586459-b4cc-46c5-a06e-44e3e075150d" (UID: "70586459-b4cc-46c5-a06e-44e3e075150d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.852675 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70586459-b4cc-46c5-a06e-44e3e075150d-kube-api-access-w7sxl" (OuterVolumeSpecName: "kube-api-access-w7sxl") pod "70586459-b4cc-46c5-a06e-44e3e075150d" (UID: "70586459-b4cc-46c5-a06e-44e3e075150d"). InnerVolumeSpecName "kube-api-access-w7sxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.880390 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "70586459-b4cc-46c5-a06e-44e3e075150d" (UID: "70586459-b4cc-46c5-a06e-44e3e075150d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.931506 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.931562 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7sxl\" (UniqueName: \"kubernetes.io/projected/70586459-b4cc-46c5-a06e-44e3e075150d-kube-api-access-w7sxl\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:18 crc kubenswrapper[4940]: I1126 08:32:18.931581 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70586459-b4cc-46c5-a06e-44e3e075150d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.372442 4940 generic.go:334] "Generic (PLEG): container finished" podID="70586459-b4cc-46c5-a06e-44e3e075150d" containerID="d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784" exitCode=0 Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.372492 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmzzl" event={"ID":"70586459-b4cc-46c5-a06e-44e3e075150d","Type":"ContainerDied","Data":"d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784"} Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.372538 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmzzl" event={"ID":"70586459-b4cc-46c5-a06e-44e3e075150d","Type":"ContainerDied","Data":"993ae43b50e7b97d740f878221cd96f7f638a98719e18091e0b9c899005f24ff"} Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.372544 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pmzzl" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.372555 4940 scope.go:117] "RemoveContainer" containerID="d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.392549 4940 scope.go:117] "RemoveContainer" containerID="85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.403496 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pmzzl"] Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.411241 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pmzzl"] Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.411553 4940 scope.go:117] "RemoveContainer" containerID="a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.452715 4940 scope.go:117] "RemoveContainer" containerID="d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784" Nov 26 08:32:19 crc kubenswrapper[4940]: E1126 08:32:19.453261 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784\": container with ID starting with d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784 not found: ID does not exist" containerID="d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.453324 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784"} err="failed to get container status \"d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784\": rpc error: code = NotFound desc = could not find container \"d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784\": container with ID starting with d9233a94b808433042baad86575e06eeb849963dd7ee5a29ff33ee6d10fe0784 not found: ID does not exist" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.453358 4940 scope.go:117] "RemoveContainer" containerID="85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281" Nov 26 08:32:19 crc kubenswrapper[4940]: E1126 08:32:19.453806 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281\": container with ID starting with 85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281 not found: ID does not exist" containerID="85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.453840 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281"} err="failed to get container status \"85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281\": rpc error: code = NotFound desc = could not find container \"85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281\": container with ID starting with 85a02cde781914a1bd788c1de8746d9ecb0632d1bb06fd27a6227c8089b4e281 not found: ID does not exist" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.453866 4940 scope.go:117] "RemoveContainer" containerID="a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b" Nov 26 08:32:19 crc kubenswrapper[4940]: E1126 08:32:19.454146 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b\": container with ID starting with a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b not found: ID does not exist" containerID="a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b" Nov 26 08:32:19 crc kubenswrapper[4940]: I1126 08:32:19.454183 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b"} err="failed to get container status \"a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b\": rpc error: code = NotFound desc = could not find container \"a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b\": container with ID starting with a63ca962a217e3e63a65f78ffb1be3497d1346cb5455fd1261bcf73bfd56598b not found: ID does not exist" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.178490 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" path="/var/lib/kubelet/pods/70586459-b4cc-46c5-a06e-44e3e075150d/volumes" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.743559 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 08:32:21 crc kubenswrapper[4940]: E1126 08:32:21.744149 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" containerName="extract-content" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.744178 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" containerName="extract-content" Nov 26 08:32:21 crc kubenswrapper[4940]: E1126 08:32:21.744202 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" containerName="extract-utilities" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.744217 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" containerName="extract-utilities" Nov 26 08:32:21 crc kubenswrapper[4940]: E1126 08:32:21.744251 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" containerName="registry-server" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.744265 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" containerName="registry-server" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.744580 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="70586459-b4cc-46c5-a06e-44e3e075150d" containerName="registry-server" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.745481 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.748222 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-krr5d" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.754133 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.773107 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n6d8\" (UniqueName: \"kubernetes.io/projected/64313888-2fa1-4bb5-94fb-10f84fed97b4-kube-api-access-8n6d8\") pod \"mariadb-client-1-default\" (UID: \"64313888-2fa1-4bb5-94fb-10f84fed97b4\") " pod="openstack/mariadb-client-1-default" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.874843 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n6d8\" (UniqueName: \"kubernetes.io/projected/64313888-2fa1-4bb5-94fb-10f84fed97b4-kube-api-access-8n6d8\") pod \"mariadb-client-1-default\" (UID: \"64313888-2fa1-4bb5-94fb-10f84fed97b4\") " pod="openstack/mariadb-client-1-default" Nov 26 08:32:21 crc kubenswrapper[4940]: I1126 08:32:21.895934 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n6d8\" (UniqueName: \"kubernetes.io/projected/64313888-2fa1-4bb5-94fb-10f84fed97b4-kube-api-access-8n6d8\") pod \"mariadb-client-1-default\" (UID: \"64313888-2fa1-4bb5-94fb-10f84fed97b4\") " pod="openstack/mariadb-client-1-default" Nov 26 08:32:22 crc kubenswrapper[4940]: I1126 08:32:22.067874 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 26 08:32:22 crc kubenswrapper[4940]: I1126 08:32:22.666541 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 08:32:23 crc kubenswrapper[4940]: I1126 08:32:23.410220 4940 generic.go:334] "Generic (PLEG): container finished" podID="64313888-2fa1-4bb5-94fb-10f84fed97b4" containerID="2de4fafcedec7453d90177185bcd3a13bb8c300e436eb7f3850e8dff1c852c81" exitCode=0 Nov 26 08:32:23 crc kubenswrapper[4940]: I1126 08:32:23.410437 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"64313888-2fa1-4bb5-94fb-10f84fed97b4","Type":"ContainerDied","Data":"2de4fafcedec7453d90177185bcd3a13bb8c300e436eb7f3850e8dff1c852c81"} Nov 26 08:32:23 crc kubenswrapper[4940]: I1126 08:32:23.410503 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"64313888-2fa1-4bb5-94fb-10f84fed97b4","Type":"ContainerStarted","Data":"52ccc59aee75e3e608d27b23f4159868762a65a4240c293d513e80f5a6fa2565"} Nov 26 08:32:24 crc kubenswrapper[4940]: I1126 08:32:24.886656 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 26 08:32:24 crc kubenswrapper[4940]: I1126 08:32:24.909808 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_64313888-2fa1-4bb5-94fb-10f84fed97b4/mariadb-client-1-default/0.log" Nov 26 08:32:24 crc kubenswrapper[4940]: I1126 08:32:24.920669 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8n6d8\" (UniqueName: \"kubernetes.io/projected/64313888-2fa1-4bb5-94fb-10f84fed97b4-kube-api-access-8n6d8\") pod \"64313888-2fa1-4bb5-94fb-10f84fed97b4\" (UID: \"64313888-2fa1-4bb5-94fb-10f84fed97b4\") " Nov 26 08:32:24 crc kubenswrapper[4940]: I1126 08:32:24.927918 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64313888-2fa1-4bb5-94fb-10f84fed97b4-kube-api-access-8n6d8" (OuterVolumeSpecName: "kube-api-access-8n6d8") pod "64313888-2fa1-4bb5-94fb-10f84fed97b4" (UID: "64313888-2fa1-4bb5-94fb-10f84fed97b4"). InnerVolumeSpecName "kube-api-access-8n6d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:24 crc kubenswrapper[4940]: I1126 08:32:24.932979 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 08:32:24 crc kubenswrapper[4940]: I1126 08:32:24.938730 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.022696 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8n6d8\" (UniqueName: \"kubernetes.io/projected/64313888-2fa1-4bb5-94fb-10f84fed97b4-kube-api-access-8n6d8\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.175193 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64313888-2fa1-4bb5-94fb-10f84fed97b4" path="/var/lib/kubelet/pods/64313888-2fa1-4bb5-94fb-10f84fed97b4/volumes" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.328207 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 08:32:25 crc kubenswrapper[4940]: E1126 08:32:25.328746 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64313888-2fa1-4bb5-94fb-10f84fed97b4" containerName="mariadb-client-1-default" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.328777 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="64313888-2fa1-4bb5-94fb-10f84fed97b4" containerName="mariadb-client-1-default" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.328983 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="64313888-2fa1-4bb5-94fb-10f84fed97b4" containerName="mariadb-client-1-default" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.329648 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.352886 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.427482 4940 scope.go:117] "RemoveContainer" containerID="2de4fafcedec7453d90177185bcd3a13bb8c300e436eb7f3850e8dff1c852c81" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.427528 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.432830 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xblhh\" (UniqueName: \"kubernetes.io/projected/4e4c528e-7373-4a4a-89c7-3809af06115f-kube-api-access-xblhh\") pod \"mariadb-client-2-default\" (UID: \"4e4c528e-7373-4a4a-89c7-3809af06115f\") " pod="openstack/mariadb-client-2-default" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.534685 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xblhh\" (UniqueName: \"kubernetes.io/projected/4e4c528e-7373-4a4a-89c7-3809af06115f-kube-api-access-xblhh\") pod \"mariadb-client-2-default\" (UID: \"4e4c528e-7373-4a4a-89c7-3809af06115f\") " pod="openstack/mariadb-client-2-default" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.561094 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xblhh\" (UniqueName: \"kubernetes.io/projected/4e4c528e-7373-4a4a-89c7-3809af06115f-kube-api-access-xblhh\") pod \"mariadb-client-2-default\" (UID: \"4e4c528e-7373-4a4a-89c7-3809af06115f\") " pod="openstack/mariadb-client-2-default" Nov 26 08:32:25 crc kubenswrapper[4940]: I1126 08:32:25.671189 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 26 08:32:26 crc kubenswrapper[4940]: I1126 08:32:26.051202 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 08:32:26 crc kubenswrapper[4940]: W1126 08:32:26.056747 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4e4c528e_7373_4a4a_89c7_3809af06115f.slice/crio-6ce3b30d978586ccd5b98b7316228980ef863a2f9c42a77f64254eca93d4a93b WatchSource:0}: Error finding container 6ce3b30d978586ccd5b98b7316228980ef863a2f9c42a77f64254eca93d4a93b: Status 404 returned error can't find the container with id 6ce3b30d978586ccd5b98b7316228980ef863a2f9c42a77f64254eca93d4a93b Nov 26 08:32:26 crc kubenswrapper[4940]: I1126 08:32:26.441804 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"4e4c528e-7373-4a4a-89c7-3809af06115f","Type":"ContainerStarted","Data":"f62849f369b49b83accb1f24ba899b1a796b89f901dc44a3a95995e2bd43c12f"} Nov 26 08:32:26 crc kubenswrapper[4940]: I1126 08:32:26.442280 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"4e4c528e-7373-4a4a-89c7-3809af06115f","Type":"ContainerStarted","Data":"6ce3b30d978586ccd5b98b7316228980ef863a2f9c42a77f64254eca93d4a93b"} Nov 26 08:32:26 crc kubenswrapper[4940]: I1126 08:32:26.476092 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-2-default" podStartSLOduration=1.476025098 podStartE2EDuration="1.476025098s" podCreationTimestamp="2025-11-26 08:32:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:32:26.459010975 +0000 UTC m=+5847.979152634" watchObservedRunningTime="2025-11-26 08:32:26.476025098 +0000 UTC m=+5847.996166727" Nov 26 08:32:27 crc kubenswrapper[4940]: I1126 08:32:27.452970 4940 generic.go:334] "Generic (PLEG): container finished" podID="4e4c528e-7373-4a4a-89c7-3809af06115f" containerID="f62849f369b49b83accb1f24ba899b1a796b89f901dc44a3a95995e2bd43c12f" exitCode=1 Nov 26 08:32:27 crc kubenswrapper[4940]: I1126 08:32:27.453170 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"4e4c528e-7373-4a4a-89c7-3809af06115f","Type":"ContainerDied","Data":"f62849f369b49b83accb1f24ba899b1a796b89f901dc44a3a95995e2bd43c12f"} Nov 26 08:32:28 crc kubenswrapper[4940]: I1126 08:32:28.835861 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 26 08:32:28 crc kubenswrapper[4940]: I1126 08:32:28.875230 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 08:32:28 crc kubenswrapper[4940]: I1126 08:32:28.881880 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 08:32:28 crc kubenswrapper[4940]: I1126 08:32:28.891690 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xblhh\" (UniqueName: \"kubernetes.io/projected/4e4c528e-7373-4a4a-89c7-3809af06115f-kube-api-access-xblhh\") pod \"4e4c528e-7373-4a4a-89c7-3809af06115f\" (UID: \"4e4c528e-7373-4a4a-89c7-3809af06115f\") " Nov 26 08:32:28 crc kubenswrapper[4940]: I1126 08:32:28.899589 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e4c528e-7373-4a4a-89c7-3809af06115f-kube-api-access-xblhh" (OuterVolumeSpecName: "kube-api-access-xblhh") pod "4e4c528e-7373-4a4a-89c7-3809af06115f" (UID: "4e4c528e-7373-4a4a-89c7-3809af06115f"). InnerVolumeSpecName "kube-api-access-xblhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:28 crc kubenswrapper[4940]: I1126 08:32:28.993501 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xblhh\" (UniqueName: \"kubernetes.io/projected/4e4c528e-7373-4a4a-89c7-3809af06115f-kube-api-access-xblhh\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.175362 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e4c528e-7373-4a4a-89c7-3809af06115f" path="/var/lib/kubelet/pods/4e4c528e-7373-4a4a-89c7-3809af06115f/volumes" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.263941 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Nov 26 08:32:29 crc kubenswrapper[4940]: E1126 08:32:29.264406 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4c528e-7373-4a4a-89c7-3809af06115f" containerName="mariadb-client-2-default" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.264426 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4c528e-7373-4a4a-89c7-3809af06115f" containerName="mariadb-client-2-default" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.264586 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e4c528e-7373-4a4a-89c7-3809af06115f" containerName="mariadb-client-2-default" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.265143 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.271792 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.296740 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrq4g\" (UniqueName: \"kubernetes.io/projected/d971cff6-4694-426c-beb1-6cf1741909bf-kube-api-access-nrq4g\") pod \"mariadb-client-1\" (UID: \"d971cff6-4694-426c-beb1-6cf1741909bf\") " pod="openstack/mariadb-client-1" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.398830 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrq4g\" (UniqueName: \"kubernetes.io/projected/d971cff6-4694-426c-beb1-6cf1741909bf-kube-api-access-nrq4g\") pod \"mariadb-client-1\" (UID: \"d971cff6-4694-426c-beb1-6cf1741909bf\") " pod="openstack/mariadb-client-1" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.428870 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrq4g\" (UniqueName: \"kubernetes.io/projected/d971cff6-4694-426c-beb1-6cf1741909bf-kube-api-access-nrq4g\") pod \"mariadb-client-1\" (UID: \"d971cff6-4694-426c-beb1-6cf1741909bf\") " pod="openstack/mariadb-client-1" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.472155 4940 scope.go:117] "RemoveContainer" containerID="f62849f369b49b83accb1f24ba899b1a796b89f901dc44a3a95995e2bd43c12f" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.472313 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 26 08:32:29 crc kubenswrapper[4940]: I1126 08:32:29.584977 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 26 08:32:30 crc kubenswrapper[4940]: I1126 08:32:30.100027 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 26 08:32:30 crc kubenswrapper[4940]: I1126 08:32:30.480432 4940 generic.go:334] "Generic (PLEG): container finished" podID="d971cff6-4694-426c-beb1-6cf1741909bf" containerID="a818f671c621df8b4473b4411a397aa50692b46d90ae580991e3e74efaf4d11e" exitCode=0 Nov 26 08:32:30 crc kubenswrapper[4940]: I1126 08:32:30.480473 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"d971cff6-4694-426c-beb1-6cf1741909bf","Type":"ContainerDied","Data":"a818f671c621df8b4473b4411a397aa50692b46d90ae580991e3e74efaf4d11e"} Nov 26 08:32:30 crc kubenswrapper[4940]: I1126 08:32:30.480507 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"d971cff6-4694-426c-beb1-6cf1741909bf","Type":"ContainerStarted","Data":"15b89b6dffd6e84356590ce34157e9f4cb1eadefc046d87ef67c619594791347"} Nov 26 08:32:31 crc kubenswrapper[4940]: I1126 08:32:31.855810 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 26 08:32:31 crc kubenswrapper[4940]: I1126 08:32:31.873836 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_d971cff6-4694-426c-beb1-6cf1741909bf/mariadb-client-1/0.log" Nov 26 08:32:31 crc kubenswrapper[4940]: I1126 08:32:31.901583 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Nov 26 08:32:31 crc kubenswrapper[4940]: I1126 08:32:31.909640 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Nov 26 08:32:31 crc kubenswrapper[4940]: I1126 08:32:31.934856 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrq4g\" (UniqueName: \"kubernetes.io/projected/d971cff6-4694-426c-beb1-6cf1741909bf-kube-api-access-nrq4g\") pod \"d971cff6-4694-426c-beb1-6cf1741909bf\" (UID: \"d971cff6-4694-426c-beb1-6cf1741909bf\") " Nov 26 08:32:31 crc kubenswrapper[4940]: I1126 08:32:31.939569 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d971cff6-4694-426c-beb1-6cf1741909bf-kube-api-access-nrq4g" (OuterVolumeSpecName: "kube-api-access-nrq4g") pod "d971cff6-4694-426c-beb1-6cf1741909bf" (UID: "d971cff6-4694-426c-beb1-6cf1741909bf"). InnerVolumeSpecName "kube-api-access-nrq4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.036323 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrq4g\" (UniqueName: \"kubernetes.io/projected/d971cff6-4694-426c-beb1-6cf1741909bf-kube-api-access-nrq4g\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.344727 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 08:32:32 crc kubenswrapper[4940]: E1126 08:32:32.345900 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d971cff6-4694-426c-beb1-6cf1741909bf" containerName="mariadb-client-1" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.345944 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d971cff6-4694-426c-beb1-6cf1741909bf" containerName="mariadb-client-1" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.346370 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d971cff6-4694-426c-beb1-6cf1741909bf" containerName="mariadb-client-1" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.347558 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.364621 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.443836 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkcw4\" (UniqueName: \"kubernetes.io/projected/5406b888-1506-47f0-b72b-7cfa9a4af98d-kube-api-access-pkcw4\") pod \"mariadb-client-4-default\" (UID: \"5406b888-1506-47f0-b72b-7cfa9a4af98d\") " pod="openstack/mariadb-client-4-default" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.503926 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15b89b6dffd6e84356590ce34157e9f4cb1eadefc046d87ef67c619594791347" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.503997 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.545604 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkcw4\" (UniqueName: \"kubernetes.io/projected/5406b888-1506-47f0-b72b-7cfa9a4af98d-kube-api-access-pkcw4\") pod \"mariadb-client-4-default\" (UID: \"5406b888-1506-47f0-b72b-7cfa9a4af98d\") " pod="openstack/mariadb-client-4-default" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.564323 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkcw4\" (UniqueName: \"kubernetes.io/projected/5406b888-1506-47f0-b72b-7cfa9a4af98d-kube-api-access-pkcw4\") pod \"mariadb-client-4-default\" (UID: \"5406b888-1506-47f0-b72b-7cfa9a4af98d\") " pod="openstack/mariadb-client-4-default" Nov 26 08:32:32 crc kubenswrapper[4940]: I1126 08:32:32.692698 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 26 08:32:33 crc kubenswrapper[4940]: I1126 08:32:33.174539 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d971cff6-4694-426c-beb1-6cf1741909bf" path="/var/lib/kubelet/pods/d971cff6-4694-426c-beb1-6cf1741909bf/volumes" Nov 26 08:32:33 crc kubenswrapper[4940]: I1126 08:32:33.238841 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 08:32:33 crc kubenswrapper[4940]: I1126 08:32:33.523843 4940 generic.go:334] "Generic (PLEG): container finished" podID="5406b888-1506-47f0-b72b-7cfa9a4af98d" containerID="e4c52551296615ebbc30cd1cccca219aa392faddd220df74a2db86df533da5b2" exitCode=0 Nov 26 08:32:33 crc kubenswrapper[4940]: I1126 08:32:33.523965 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"5406b888-1506-47f0-b72b-7cfa9a4af98d","Type":"ContainerDied","Data":"e4c52551296615ebbc30cd1cccca219aa392faddd220df74a2db86df533da5b2"} Nov 26 08:32:33 crc kubenswrapper[4940]: I1126 08:32:33.524232 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"5406b888-1506-47f0-b72b-7cfa9a4af98d","Type":"ContainerStarted","Data":"9698603de43323d4bd2520e63c5dae637d3ab5b889530eb5830c5cea642643d5"} Nov 26 08:32:34 crc kubenswrapper[4940]: I1126 08:32:34.955617 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 26 08:32:34 crc kubenswrapper[4940]: I1126 08:32:34.975764 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_5406b888-1506-47f0-b72b-7cfa9a4af98d/mariadb-client-4-default/0.log" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.003509 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.011991 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.080382 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkcw4\" (UniqueName: \"kubernetes.io/projected/5406b888-1506-47f0-b72b-7cfa9a4af98d-kube-api-access-pkcw4\") pod \"5406b888-1506-47f0-b72b-7cfa9a4af98d\" (UID: \"5406b888-1506-47f0-b72b-7cfa9a4af98d\") " Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.088741 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5406b888-1506-47f0-b72b-7cfa9a4af98d-kube-api-access-pkcw4" (OuterVolumeSpecName: "kube-api-access-pkcw4") pod "5406b888-1506-47f0-b72b-7cfa9a4af98d" (UID: "5406b888-1506-47f0-b72b-7cfa9a4af98d"). InnerVolumeSpecName "kube-api-access-pkcw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.181231 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5406b888-1506-47f0-b72b-7cfa9a4af98d" path="/var/lib/kubelet/pods/5406b888-1506-47f0-b72b-7cfa9a4af98d/volumes" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.185755 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-m5z4n"] Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.185940 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkcw4\" (UniqueName: \"kubernetes.io/projected/5406b888-1506-47f0-b72b-7cfa9a4af98d-kube-api-access-pkcw4\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:35 crc kubenswrapper[4940]: E1126 08:32:35.186103 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5406b888-1506-47f0-b72b-7cfa9a4af98d" containerName="mariadb-client-4-default" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.186118 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5406b888-1506-47f0-b72b-7cfa9a4af98d" containerName="mariadb-client-4-default" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.186419 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5406b888-1506-47f0-b72b-7cfa9a4af98d" containerName="mariadb-client-4-default" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.187806 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.198959 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m5z4n"] Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.388841 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-utilities\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.388914 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czbsb\" (UniqueName: \"kubernetes.io/projected/55be596d-a288-4936-bfb6-39486f5b6ecc-kube-api-access-czbsb\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.388945 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-catalog-content\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.490509 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-utilities\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.490589 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czbsb\" (UniqueName: \"kubernetes.io/projected/55be596d-a288-4936-bfb6-39486f5b6ecc-kube-api-access-czbsb\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.490626 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-catalog-content\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.491091 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-utilities\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.491105 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-catalog-content\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.507976 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czbsb\" (UniqueName: \"kubernetes.io/projected/55be596d-a288-4936-bfb6-39486f5b6ecc-kube-api-access-czbsb\") pod \"redhat-marketplace-m5z4n\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.516216 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.551160 4940 scope.go:117] "RemoveContainer" containerID="e4c52551296615ebbc30cd1cccca219aa392faddd220df74a2db86df533da5b2" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.551232 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 26 08:32:35 crc kubenswrapper[4940]: I1126 08:32:35.955548 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m5z4n"] Nov 26 08:32:36 crc kubenswrapper[4940]: I1126 08:32:36.562286 4940 generic.go:334] "Generic (PLEG): container finished" podID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerID="45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784" exitCode=0 Nov 26 08:32:36 crc kubenswrapper[4940]: I1126 08:32:36.562368 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5z4n" event={"ID":"55be596d-a288-4936-bfb6-39486f5b6ecc","Type":"ContainerDied","Data":"45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784"} Nov 26 08:32:36 crc kubenswrapper[4940]: I1126 08:32:36.562435 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5z4n" event={"ID":"55be596d-a288-4936-bfb6-39486f5b6ecc","Type":"ContainerStarted","Data":"e82b19c385abe9fd082ec4c40a63283e944e49ba42699e01ff2839a332d0c159"} Nov 26 08:32:37 crc kubenswrapper[4940]: I1126 08:32:37.574675 4940 generic.go:334] "Generic (PLEG): container finished" podID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerID="fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8" exitCode=0 Nov 26 08:32:37 crc kubenswrapper[4940]: I1126 08:32:37.574816 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5z4n" event={"ID":"55be596d-a288-4936-bfb6-39486f5b6ecc","Type":"ContainerDied","Data":"fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8"} Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.586654 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5z4n" event={"ID":"55be596d-a288-4936-bfb6-39486f5b6ecc","Type":"ContainerStarted","Data":"2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c"} Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.611787 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-m5z4n" podStartSLOduration=2.176744343 podStartE2EDuration="3.611762983s" podCreationTimestamp="2025-11-26 08:32:35 +0000 UTC" firstStartedPulling="2025-11-26 08:32:36.566219565 +0000 UTC m=+5858.086361224" lastFinishedPulling="2025-11-26 08:32:38.001238205 +0000 UTC m=+5859.521379864" observedRunningTime="2025-11-26 08:32:38.609393058 +0000 UTC m=+5860.129534687" watchObservedRunningTime="2025-11-26 08:32:38.611762983 +0000 UTC m=+5860.131904612" Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.629828 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.631334 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.633167 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-krr5d" Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.638431 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.641115 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-584cq\" (UniqueName: \"kubernetes.io/projected/ef55c75a-39fc-400e-9830-47d111ec48fa-kube-api-access-584cq\") pod \"mariadb-client-5-default\" (UID: \"ef55c75a-39fc-400e-9830-47d111ec48fa\") " pod="openstack/mariadb-client-5-default" Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.742957 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-584cq\" (UniqueName: \"kubernetes.io/projected/ef55c75a-39fc-400e-9830-47d111ec48fa-kube-api-access-584cq\") pod \"mariadb-client-5-default\" (UID: \"ef55c75a-39fc-400e-9830-47d111ec48fa\") " pod="openstack/mariadb-client-5-default" Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.771294 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-584cq\" (UniqueName: \"kubernetes.io/projected/ef55c75a-39fc-400e-9830-47d111ec48fa-kube-api-access-584cq\") pod \"mariadb-client-5-default\" (UID: \"ef55c75a-39fc-400e-9830-47d111ec48fa\") " pod="openstack/mariadb-client-5-default" Nov 26 08:32:38 crc kubenswrapper[4940]: I1126 08:32:38.959771 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 26 08:32:39 crc kubenswrapper[4940]: I1126 08:32:39.498667 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 08:32:39 crc kubenswrapper[4940]: I1126 08:32:39.596787 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"ef55c75a-39fc-400e-9830-47d111ec48fa","Type":"ContainerStarted","Data":"57b59b79b2217df10b32d54c3b93a3f113b6613b3f308302f267d09f4283e13a"} Nov 26 08:32:40 crc kubenswrapper[4940]: I1126 08:32:40.608896 4940 generic.go:334] "Generic (PLEG): container finished" podID="ef55c75a-39fc-400e-9830-47d111ec48fa" containerID="c126813ba56dd6bc14eb70fe8eaa031fae24f2b3bb849d3d2f583dc741bb47dc" exitCode=0 Nov 26 08:32:40 crc kubenswrapper[4940]: I1126 08:32:40.608954 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"ef55c75a-39fc-400e-9830-47d111ec48fa","Type":"ContainerDied","Data":"c126813ba56dd6bc14eb70fe8eaa031fae24f2b3bb849d3d2f583dc741bb47dc"} Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.036617 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.064560 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_ef55c75a-39fc-400e-9830-47d111ec48fa/mariadb-client-5-default/0.log" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.088244 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.093250 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.196503 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-584cq\" (UniqueName: \"kubernetes.io/projected/ef55c75a-39fc-400e-9830-47d111ec48fa-kube-api-access-584cq\") pod \"ef55c75a-39fc-400e-9830-47d111ec48fa\" (UID: \"ef55c75a-39fc-400e-9830-47d111ec48fa\") " Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.203990 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef55c75a-39fc-400e-9830-47d111ec48fa-kube-api-access-584cq" (OuterVolumeSpecName: "kube-api-access-584cq") pod "ef55c75a-39fc-400e-9830-47d111ec48fa" (UID: "ef55c75a-39fc-400e-9830-47d111ec48fa"). InnerVolumeSpecName "kube-api-access-584cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.231651 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 08:32:42 crc kubenswrapper[4940]: E1126 08:32:42.232251 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55c75a-39fc-400e-9830-47d111ec48fa" containerName="mariadb-client-5-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.232274 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55c75a-39fc-400e-9830-47d111ec48fa" containerName="mariadb-client-5-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.232573 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55c75a-39fc-400e-9830-47d111ec48fa" containerName="mariadb-client-5-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.233456 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.245535 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.299251 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-584cq\" (UniqueName: \"kubernetes.io/projected/ef55c75a-39fc-400e-9830-47d111ec48fa-kube-api-access-584cq\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.400133 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7n4c\" (UniqueName: \"kubernetes.io/projected/2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0-kube-api-access-d7n4c\") pod \"mariadb-client-6-default\" (UID: \"2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0\") " pod="openstack/mariadb-client-6-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.501781 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7n4c\" (UniqueName: \"kubernetes.io/projected/2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0-kube-api-access-d7n4c\") pod \"mariadb-client-6-default\" (UID: \"2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0\") " pod="openstack/mariadb-client-6-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.526791 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7n4c\" (UniqueName: \"kubernetes.io/projected/2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0-kube-api-access-d7n4c\") pod \"mariadb-client-6-default\" (UID: \"2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0\") " pod="openstack/mariadb-client-6-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.569688 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.630198 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57b59b79b2217df10b32d54c3b93a3f113b6613b3f308302f267d09f4283e13a" Nov 26 08:32:42 crc kubenswrapper[4940]: I1126 08:32:42.630421 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 26 08:32:43 crc kubenswrapper[4940]: I1126 08:32:43.132336 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 08:32:43 crc kubenswrapper[4940]: I1126 08:32:43.173899 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef55c75a-39fc-400e-9830-47d111ec48fa" path="/var/lib/kubelet/pods/ef55c75a-39fc-400e-9830-47d111ec48fa/volumes" Nov 26 08:32:43 crc kubenswrapper[4940]: I1126 08:32:43.637131 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0","Type":"ContainerStarted","Data":"bc8f3d3311796fa4d5f66f6a0c239dc93d5504d4140eca1b02e25b5ca9a61439"} Nov 26 08:32:43 crc kubenswrapper[4940]: I1126 08:32:43.637177 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0","Type":"ContainerStarted","Data":"3028997c3d1dbf7c65bb56eb4313873bc7e7b86d22e08ea6e8d29369e6ee1dc8"} Nov 26 08:32:43 crc kubenswrapper[4940]: I1126 08:32:43.654542 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-6-default" podStartSLOduration=1.654510733 podStartE2EDuration="1.654510733s" podCreationTimestamp="2025-11-26 08:32:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:32:43.648132219 +0000 UTC m=+5865.168273838" watchObservedRunningTime="2025-11-26 08:32:43.654510733 +0000 UTC m=+5865.174652352" Nov 26 08:32:44 crc kubenswrapper[4940]: I1126 08:32:44.645929 4940 generic.go:334] "Generic (PLEG): container finished" podID="2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0" containerID="bc8f3d3311796fa4d5f66f6a0c239dc93d5504d4140eca1b02e25b5ca9a61439" exitCode=1 Nov 26 08:32:44 crc kubenswrapper[4940]: I1126 08:32:44.646035 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0","Type":"ContainerDied","Data":"bc8f3d3311796fa4d5f66f6a0c239dc93d5504d4140eca1b02e25b5ca9a61439"} Nov 26 08:32:45 crc kubenswrapper[4940]: I1126 08:32:45.517942 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:45 crc kubenswrapper[4940]: I1126 08:32:45.518067 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:45 crc kubenswrapper[4940]: I1126 08:32:45.573525 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:45 crc kubenswrapper[4940]: I1126 08:32:45.702773 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:45 crc kubenswrapper[4940]: I1126 08:32:45.822355 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m5z4n"] Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.039577 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.087094 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.095558 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.163774 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7n4c\" (UniqueName: \"kubernetes.io/projected/2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0-kube-api-access-d7n4c\") pod \"2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0\" (UID: \"2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0\") " Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.170398 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0-kube-api-access-d7n4c" (OuterVolumeSpecName: "kube-api-access-d7n4c") pod "2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0" (UID: "2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0"). InnerVolumeSpecName "kube-api-access-d7n4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.195919 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 08:32:46 crc kubenswrapper[4940]: E1126 08:32:46.196249 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0" containerName="mariadb-client-6-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.196261 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0" containerName="mariadb-client-6-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.196435 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0" containerName="mariadb-client-6-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.196904 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.211718 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.265975 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7n4c\" (UniqueName: \"kubernetes.io/projected/2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0-kube-api-access-d7n4c\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.367677 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkzqc\" (UniqueName: \"kubernetes.io/projected/cffc0c7a-0dfe-4043-9a50-9c32a9c53746-kube-api-access-mkzqc\") pod \"mariadb-client-7-default\" (UID: \"cffc0c7a-0dfe-4043-9a50-9c32a9c53746\") " pod="openstack/mariadb-client-7-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.469807 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkzqc\" (UniqueName: \"kubernetes.io/projected/cffc0c7a-0dfe-4043-9a50-9c32a9c53746-kube-api-access-mkzqc\") pod \"mariadb-client-7-default\" (UID: \"cffc0c7a-0dfe-4043-9a50-9c32a9c53746\") " pod="openstack/mariadb-client-7-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.499318 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkzqc\" (UniqueName: \"kubernetes.io/projected/cffc0c7a-0dfe-4043-9a50-9c32a9c53746-kube-api-access-mkzqc\") pod \"mariadb-client-7-default\" (UID: \"cffc0c7a-0dfe-4043-9a50-9c32a9c53746\") " pod="openstack/mariadb-client-7-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.552036 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.668657 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 26 08:32:46 crc kubenswrapper[4940]: I1126 08:32:46.671287 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3028997c3d1dbf7c65bb56eb4313873bc7e7b86d22e08ea6e8d29369e6ee1dc8" Nov 26 08:32:47 crc kubenswrapper[4940]: I1126 08:32:47.134260 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 08:32:47 crc kubenswrapper[4940]: I1126 08:32:47.184417 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0" path="/var/lib/kubelet/pods/2a04c47c-6ab6-4b9d-a90a-f524c0e15ec0/volumes" Nov 26 08:32:47 crc kubenswrapper[4940]: I1126 08:32:47.679624 4940 generic.go:334] "Generic (PLEG): container finished" podID="cffc0c7a-0dfe-4043-9a50-9c32a9c53746" containerID="0ae6479fd14731cfc27bfeafde100115abeb3dc0af725582ed56d033fc79e4b4" exitCode=0 Nov 26 08:32:47 crc kubenswrapper[4940]: I1126 08:32:47.679741 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"cffc0c7a-0dfe-4043-9a50-9c32a9c53746","Type":"ContainerDied","Data":"0ae6479fd14731cfc27bfeafde100115abeb3dc0af725582ed56d033fc79e4b4"} Nov 26 08:32:47 crc kubenswrapper[4940]: I1126 08:32:47.679998 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"cffc0c7a-0dfe-4043-9a50-9c32a9c53746","Type":"ContainerStarted","Data":"9a32a58b90ec31564526c674b19da31c965fde711d1f4ad4ee8fea6528a40772"} Nov 26 08:32:47 crc kubenswrapper[4940]: I1126 08:32:47.680326 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-m5z4n" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerName="registry-server" containerID="cri-o://2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c" gracePeriod=2 Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.130138 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.300534 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-catalog-content\") pod \"55be596d-a288-4936-bfb6-39486f5b6ecc\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.300611 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-utilities\") pod \"55be596d-a288-4936-bfb6-39486f5b6ecc\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.300631 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czbsb\" (UniqueName: \"kubernetes.io/projected/55be596d-a288-4936-bfb6-39486f5b6ecc-kube-api-access-czbsb\") pod \"55be596d-a288-4936-bfb6-39486f5b6ecc\" (UID: \"55be596d-a288-4936-bfb6-39486f5b6ecc\") " Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.301477 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-utilities" (OuterVolumeSpecName: "utilities") pod "55be596d-a288-4936-bfb6-39486f5b6ecc" (UID: "55be596d-a288-4936-bfb6-39486f5b6ecc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.312305 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55be596d-a288-4936-bfb6-39486f5b6ecc-kube-api-access-czbsb" (OuterVolumeSpecName: "kube-api-access-czbsb") pod "55be596d-a288-4936-bfb6-39486f5b6ecc" (UID: "55be596d-a288-4936-bfb6-39486f5b6ecc"). InnerVolumeSpecName "kube-api-access-czbsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.323971 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55be596d-a288-4936-bfb6-39486f5b6ecc" (UID: "55be596d-a288-4936-bfb6-39486f5b6ecc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.402112 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.402145 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czbsb\" (UniqueName: \"kubernetes.io/projected/55be596d-a288-4936-bfb6-39486f5b6ecc-kube-api-access-czbsb\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.402155 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55be596d-a288-4936-bfb6-39486f5b6ecc-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.691241 4940 generic.go:334] "Generic (PLEG): container finished" podID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerID="2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c" exitCode=0 Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.691395 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5z4n" event={"ID":"55be596d-a288-4936-bfb6-39486f5b6ecc","Type":"ContainerDied","Data":"2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c"} Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.691512 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m5z4n" event={"ID":"55be596d-a288-4936-bfb6-39486f5b6ecc","Type":"ContainerDied","Data":"e82b19c385abe9fd082ec4c40a63283e944e49ba42699e01ff2839a332d0c159"} Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.691565 4940 scope.go:117] "RemoveContainer" containerID="2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.691737 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m5z4n" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.730200 4940 scope.go:117] "RemoveContainer" containerID="fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.760445 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m5z4n"] Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.767860 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-m5z4n"] Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.793944 4940 scope.go:117] "RemoveContainer" containerID="45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.817571 4940 scope.go:117] "RemoveContainer" containerID="2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c" Nov 26 08:32:48 crc kubenswrapper[4940]: E1126 08:32:48.818054 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c\": container with ID starting with 2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c not found: ID does not exist" containerID="2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.818085 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c"} err="failed to get container status \"2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c\": rpc error: code = NotFound desc = could not find container \"2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c\": container with ID starting with 2c6b5eb7ef6f594553247ca6653f0db10e672f70e7ce176f6a54b99dc44e397c not found: ID does not exist" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.818104 4940 scope.go:117] "RemoveContainer" containerID="fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8" Nov 26 08:32:48 crc kubenswrapper[4940]: E1126 08:32:48.818371 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8\": container with ID starting with fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8 not found: ID does not exist" containerID="fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.818414 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8"} err="failed to get container status \"fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8\": rpc error: code = NotFound desc = could not find container \"fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8\": container with ID starting with fb7ed7451d9675503c651b025bff1863dc4a7d413f267432110ea39a2bd54ac8 not found: ID does not exist" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.818447 4940 scope.go:117] "RemoveContainer" containerID="45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784" Nov 26 08:32:48 crc kubenswrapper[4940]: E1126 08:32:48.818673 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784\": container with ID starting with 45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784 not found: ID does not exist" containerID="45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784" Nov 26 08:32:48 crc kubenswrapper[4940]: I1126 08:32:48.818702 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784"} err="failed to get container status \"45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784\": rpc error: code = NotFound desc = could not find container \"45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784\": container with ID starting with 45358835cea6d24514db760b478d0f3146de37e746ad529268fb9bf1cc87d784 not found: ID does not exist" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.012316 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.036236 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_cffc0c7a-0dfe-4043-9a50-9c32a9c53746/mariadb-client-7-default/0.log" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.061959 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.076613 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.112279 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkzqc\" (UniqueName: \"kubernetes.io/projected/cffc0c7a-0dfe-4043-9a50-9c32a9c53746-kube-api-access-mkzqc\") pod \"cffc0c7a-0dfe-4043-9a50-9c32a9c53746\" (UID: \"cffc0c7a-0dfe-4043-9a50-9c32a9c53746\") " Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.116352 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cffc0c7a-0dfe-4043-9a50-9c32a9c53746-kube-api-access-mkzqc" (OuterVolumeSpecName: "kube-api-access-mkzqc") pod "cffc0c7a-0dfe-4043-9a50-9c32a9c53746" (UID: "cffc0c7a-0dfe-4043-9a50-9c32a9c53746"). InnerVolumeSpecName "kube-api-access-mkzqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.174992 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" path="/var/lib/kubelet/pods/55be596d-a288-4936-bfb6-39486f5b6ecc/volumes" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.175729 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cffc0c7a-0dfe-4043-9a50-9c32a9c53746" path="/var/lib/kubelet/pods/cffc0c7a-0dfe-4043-9a50-9c32a9c53746/volumes" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.182253 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Nov 26 08:32:49 crc kubenswrapper[4940]: E1126 08:32:49.182520 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerName="extract-content" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.182535 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerName="extract-content" Nov 26 08:32:49 crc kubenswrapper[4940]: E1126 08:32:49.182559 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerName="registry-server" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.182566 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerName="registry-server" Nov 26 08:32:49 crc kubenswrapper[4940]: E1126 08:32:49.182584 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerName="extract-utilities" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.182590 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerName="extract-utilities" Nov 26 08:32:49 crc kubenswrapper[4940]: E1126 08:32:49.182608 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffc0c7a-0dfe-4043-9a50-9c32a9c53746" containerName="mariadb-client-7-default" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.182614 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffc0c7a-0dfe-4043-9a50-9c32a9c53746" containerName="mariadb-client-7-default" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.182746 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="55be596d-a288-4936-bfb6-39486f5b6ecc" containerName="registry-server" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.182770 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cffc0c7a-0dfe-4043-9a50-9c32a9c53746" containerName="mariadb-client-7-default" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.183263 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.214189 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkzqc\" (UniqueName: \"kubernetes.io/projected/cffc0c7a-0dfe-4043-9a50-9c32a9c53746-kube-api-access-mkzqc\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.232228 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.315234 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prcmx\" (UniqueName: \"kubernetes.io/projected/f62836a2-b44f-4cc8-a948-639df127c4bc-kube-api-access-prcmx\") pod \"mariadb-client-2\" (UID: \"f62836a2-b44f-4cc8-a948-639df127c4bc\") " pod="openstack/mariadb-client-2" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.416932 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prcmx\" (UniqueName: \"kubernetes.io/projected/f62836a2-b44f-4cc8-a948-639df127c4bc-kube-api-access-prcmx\") pod \"mariadb-client-2\" (UID: \"f62836a2-b44f-4cc8-a948-639df127c4bc\") " pod="openstack/mariadb-client-2" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.434952 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prcmx\" (UniqueName: \"kubernetes.io/projected/f62836a2-b44f-4cc8-a948-639df127c4bc-kube-api-access-prcmx\") pod \"mariadb-client-2\" (UID: \"f62836a2-b44f-4cc8-a948-639df127c4bc\") " pod="openstack/mariadb-client-2" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.503639 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.702500 4940 scope.go:117] "RemoveContainer" containerID="0ae6479fd14731cfc27bfeafde100115abeb3dc0af725582ed56d033fc79e4b4" Nov 26 08:32:49 crc kubenswrapper[4940]: I1126 08:32:49.702590 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 26 08:32:50 crc kubenswrapper[4940]: I1126 08:32:50.017992 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 26 08:32:50 crc kubenswrapper[4940]: W1126 08:32:50.023817 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf62836a2_b44f_4cc8_a948_639df127c4bc.slice/crio-0d11cb1373c327f27e4c6b664a057f574882d84920bb4cda3247019b85fce03b WatchSource:0}: Error finding container 0d11cb1373c327f27e4c6b664a057f574882d84920bb4cda3247019b85fce03b: Status 404 returned error can't find the container with id 0d11cb1373c327f27e4c6b664a057f574882d84920bb4cda3247019b85fce03b Nov 26 08:32:50 crc kubenswrapper[4940]: I1126 08:32:50.711005 4940 generic.go:334] "Generic (PLEG): container finished" podID="f62836a2-b44f-4cc8-a948-639df127c4bc" containerID="d3a2d3ac59f248cecc0368fd1d62c25521ed6af77b2d80850b916042ce7da41f" exitCode=0 Nov 26 08:32:50 crc kubenswrapper[4940]: I1126 08:32:50.711101 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"f62836a2-b44f-4cc8-a948-639df127c4bc","Type":"ContainerDied","Data":"d3a2d3ac59f248cecc0368fd1d62c25521ed6af77b2d80850b916042ce7da41f"} Nov 26 08:32:50 crc kubenswrapper[4940]: I1126 08:32:50.711128 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"f62836a2-b44f-4cc8-a948-639df127c4bc","Type":"ContainerStarted","Data":"0d11cb1373c327f27e4c6b664a057f574882d84920bb4cda3247019b85fce03b"} Nov 26 08:32:51 crc kubenswrapper[4940]: I1126 08:32:51.728076 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:32:51 crc kubenswrapper[4940]: I1126 08:32:51.728163 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.121312 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.137890 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_f62836a2-b44f-4cc8-a948-639df127c4bc/mariadb-client-2/0.log" Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.161358 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.169105 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.260851 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prcmx\" (UniqueName: \"kubernetes.io/projected/f62836a2-b44f-4cc8-a948-639df127c4bc-kube-api-access-prcmx\") pod \"f62836a2-b44f-4cc8-a948-639df127c4bc\" (UID: \"f62836a2-b44f-4cc8-a948-639df127c4bc\") " Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.266965 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f62836a2-b44f-4cc8-a948-639df127c4bc-kube-api-access-prcmx" (OuterVolumeSpecName: "kube-api-access-prcmx") pod "f62836a2-b44f-4cc8-a948-639df127c4bc" (UID: "f62836a2-b44f-4cc8-a948-639df127c4bc"). InnerVolumeSpecName "kube-api-access-prcmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.362424 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prcmx\" (UniqueName: \"kubernetes.io/projected/f62836a2-b44f-4cc8-a948-639df127c4bc-kube-api-access-prcmx\") on node \"crc\" DevicePath \"\"" Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.730515 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d11cb1373c327f27e4c6b664a057f574882d84920bb4cda3247019b85fce03b" Nov 26 08:32:52 crc kubenswrapper[4940]: I1126 08:32:52.730575 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 26 08:32:53 crc kubenswrapper[4940]: I1126 08:32:53.177773 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f62836a2-b44f-4cc8-a948-639df127c4bc" path="/var/lib/kubelet/pods/f62836a2-b44f-4cc8-a948-639df127c4bc/volumes" Nov 26 08:33:21 crc kubenswrapper[4940]: I1126 08:33:21.729017 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:33:21 crc kubenswrapper[4940]: I1126 08:33:21.729640 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:33:51 crc kubenswrapper[4940]: I1126 08:33:51.728760 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:33:51 crc kubenswrapper[4940]: I1126 08:33:51.729561 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:33:51 crc kubenswrapper[4940]: I1126 08:33:51.729650 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:33:51 crc kubenswrapper[4940]: I1126 08:33:51.730735 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:33:51 crc kubenswrapper[4940]: I1126 08:33:51.730854 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" gracePeriod=600 Nov 26 08:33:51 crc kubenswrapper[4940]: E1126 08:33:51.863456 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:33:52 crc kubenswrapper[4940]: I1126 08:33:52.596136 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" exitCode=0 Nov 26 08:33:52 crc kubenswrapper[4940]: I1126 08:33:52.596188 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4"} Nov 26 08:33:52 crc kubenswrapper[4940]: I1126 08:33:52.596273 4940 scope.go:117] "RemoveContainer" containerID="cc9b88f131c13a12d01b7bae4e849661d4ac4d50a4ca0815449140f6bec70a5d" Nov 26 08:33:52 crc kubenswrapper[4940]: I1126 08:33:52.596995 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:33:52 crc kubenswrapper[4940]: E1126 08:33:52.597558 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:34:04 crc kubenswrapper[4940]: I1126 08:34:04.165410 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:34:04 crc kubenswrapper[4940]: E1126 08:34:04.166393 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:34:09 crc kubenswrapper[4940]: I1126 08:34:09.502299 4940 scope.go:117] "RemoveContainer" containerID="0f930a64618fbc77bcbf6e2a205d16cc371a04f88c8879791cb093d72e8c48f8" Nov 26 08:34:17 crc kubenswrapper[4940]: I1126 08:34:17.165694 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:34:17 crc kubenswrapper[4940]: E1126 08:34:17.166493 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:34:32 crc kubenswrapper[4940]: I1126 08:34:32.165654 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:34:32 crc kubenswrapper[4940]: E1126 08:34:32.166420 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:34:44 crc kubenswrapper[4940]: I1126 08:34:44.166289 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:34:44 crc kubenswrapper[4940]: E1126 08:34:44.168408 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:34:58 crc kubenswrapper[4940]: I1126 08:34:58.165596 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:34:58 crc kubenswrapper[4940]: E1126 08:34:58.166571 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.175855 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mwfpr"] Nov 26 08:35:11 crc kubenswrapper[4940]: E1126 08:35:11.176608 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f62836a2-b44f-4cc8-a948-639df127c4bc" containerName="mariadb-client-2" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.176620 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f62836a2-b44f-4cc8-a948-639df127c4bc" containerName="mariadb-client-2" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.176805 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f62836a2-b44f-4cc8-a948-639df127c4bc" containerName="mariadb-client-2" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.178007 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.182184 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mwfpr"] Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.279373 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-utilities\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.279770 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-catalog-content\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.279892 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp78r\" (UniqueName: \"kubernetes.io/projected/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-kube-api-access-mp78r\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.381867 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-catalog-content\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.381930 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp78r\" (UniqueName: \"kubernetes.io/projected/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-kube-api-access-mp78r\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.381965 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-utilities\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.382506 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-catalog-content\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.382540 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-utilities\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.417422 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp78r\" (UniqueName: \"kubernetes.io/projected/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-kube-api-access-mp78r\") pod \"community-operators-mwfpr\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.506100 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:11 crc kubenswrapper[4940]: I1126 08:35:11.786699 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mwfpr"] Nov 26 08:35:12 crc kubenswrapper[4940]: I1126 08:35:12.331834 4940 generic.go:334] "Generic (PLEG): container finished" podID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerID="35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73" exitCode=0 Nov 26 08:35:12 crc kubenswrapper[4940]: I1126 08:35:12.331921 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwfpr" event={"ID":"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb","Type":"ContainerDied","Data":"35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73"} Nov 26 08:35:12 crc kubenswrapper[4940]: I1126 08:35:12.332362 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwfpr" event={"ID":"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb","Type":"ContainerStarted","Data":"556bd4ea98b8dff7fc9e4733508cbe1afcd221aed6a287baef3c633d2dc75c57"} Nov 26 08:35:12 crc kubenswrapper[4940]: I1126 08:35:12.335977 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:35:13 crc kubenswrapper[4940]: I1126 08:35:13.165416 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:35:13 crc kubenswrapper[4940]: E1126 08:35:13.165929 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:35:13 crc kubenswrapper[4940]: I1126 08:35:13.343711 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwfpr" event={"ID":"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb","Type":"ContainerStarted","Data":"4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3"} Nov 26 08:35:14 crc kubenswrapper[4940]: I1126 08:35:14.357628 4940 generic.go:334] "Generic (PLEG): container finished" podID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerID="4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3" exitCode=0 Nov 26 08:35:14 crc kubenswrapper[4940]: I1126 08:35:14.357678 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwfpr" event={"ID":"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb","Type":"ContainerDied","Data":"4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3"} Nov 26 08:35:15 crc kubenswrapper[4940]: I1126 08:35:15.375535 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwfpr" event={"ID":"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb","Type":"ContainerStarted","Data":"d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51"} Nov 26 08:35:15 crc kubenswrapper[4940]: I1126 08:35:15.403731 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mwfpr" podStartSLOduration=1.984794339 podStartE2EDuration="4.403704861s" podCreationTimestamp="2025-11-26 08:35:11 +0000 UTC" firstStartedPulling="2025-11-26 08:35:12.335714069 +0000 UTC m=+6013.855855688" lastFinishedPulling="2025-11-26 08:35:14.754624581 +0000 UTC m=+6016.274766210" observedRunningTime="2025-11-26 08:35:15.394501286 +0000 UTC m=+6016.914642905" watchObservedRunningTime="2025-11-26 08:35:15.403704861 +0000 UTC m=+6016.923846520" Nov 26 08:35:21 crc kubenswrapper[4940]: I1126 08:35:21.506251 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:21 crc kubenswrapper[4940]: I1126 08:35:21.507160 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:21 crc kubenswrapper[4940]: I1126 08:35:21.586377 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:22 crc kubenswrapper[4940]: I1126 08:35:22.502459 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:22 crc kubenswrapper[4940]: I1126 08:35:22.551690 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mwfpr"] Nov 26 08:35:24 crc kubenswrapper[4940]: I1126 08:35:24.470012 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mwfpr" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerName="registry-server" containerID="cri-o://d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51" gracePeriod=2 Nov 26 08:35:24 crc kubenswrapper[4940]: I1126 08:35:24.956517 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.111821 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-catalog-content\") pod \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.111986 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mp78r\" (UniqueName: \"kubernetes.io/projected/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-kube-api-access-mp78r\") pod \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.112213 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-utilities\") pod \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\" (UID: \"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb\") " Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.113937 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-utilities" (OuterVolumeSpecName: "utilities") pod "ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" (UID: "ae1bcfc8-ec31-4dc6-8836-c620c450e9bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.118253 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-kube-api-access-mp78r" (OuterVolumeSpecName: "kube-api-access-mp78r") pod "ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" (UID: "ae1bcfc8-ec31-4dc6-8836-c620c450e9bb"). InnerVolumeSpecName "kube-api-access-mp78r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.179432 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" (UID: "ae1bcfc8-ec31-4dc6-8836-c620c450e9bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.215169 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.215204 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mp78r\" (UniqueName: \"kubernetes.io/projected/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-kube-api-access-mp78r\") on node \"crc\" DevicePath \"\"" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.215220 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.483501 4940 generic.go:334] "Generic (PLEG): container finished" podID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerID="d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51" exitCode=0 Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.483562 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwfpr" event={"ID":"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb","Type":"ContainerDied","Data":"d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51"} Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.483599 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwfpr" event={"ID":"ae1bcfc8-ec31-4dc6-8836-c620c450e9bb","Type":"ContainerDied","Data":"556bd4ea98b8dff7fc9e4733508cbe1afcd221aed6a287baef3c633d2dc75c57"} Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.483628 4940 scope.go:117] "RemoveContainer" containerID="d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.483796 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mwfpr" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.525403 4940 scope.go:117] "RemoveContainer" containerID="4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.529819 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mwfpr"] Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.537819 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mwfpr"] Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.547978 4940 scope.go:117] "RemoveContainer" containerID="35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.573140 4940 scope.go:117] "RemoveContainer" containerID="d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51" Nov 26 08:35:25 crc kubenswrapper[4940]: E1126 08:35:25.573536 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51\": container with ID starting with d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51 not found: ID does not exist" containerID="d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.573577 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51"} err="failed to get container status \"d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51\": rpc error: code = NotFound desc = could not find container \"d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51\": container with ID starting with d9f91466a8cd482e00c5069435fe3007242881c2834756337137a718c8665d51 not found: ID does not exist" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.573604 4940 scope.go:117] "RemoveContainer" containerID="4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3" Nov 26 08:35:25 crc kubenswrapper[4940]: E1126 08:35:25.574187 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3\": container with ID starting with 4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3 not found: ID does not exist" containerID="4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.574238 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3"} err="failed to get container status \"4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3\": rpc error: code = NotFound desc = could not find container \"4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3\": container with ID starting with 4db988784208ef54e10ee9dd76fb7c41043db97453f85f18cd32525a07c3b0a3 not found: ID does not exist" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.574272 4940 scope.go:117] "RemoveContainer" containerID="35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73" Nov 26 08:35:25 crc kubenswrapper[4940]: E1126 08:35:25.574596 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73\": container with ID starting with 35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73 not found: ID does not exist" containerID="35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73" Nov 26 08:35:25 crc kubenswrapper[4940]: I1126 08:35:25.574636 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73"} err="failed to get container status \"35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73\": rpc error: code = NotFound desc = could not find container \"35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73\": container with ID starting with 35023e143994bccc739a8d900b54efb081afd3e80cfa3926ffb8f05faab3fc73 not found: ID does not exist" Nov 26 08:35:26 crc kubenswrapper[4940]: I1126 08:35:26.165528 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:35:26 crc kubenswrapper[4940]: E1126 08:35:26.166203 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:35:27 crc kubenswrapper[4940]: I1126 08:35:27.184558 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" path="/var/lib/kubelet/pods/ae1bcfc8-ec31-4dc6-8836-c620c450e9bb/volumes" Nov 26 08:35:41 crc kubenswrapper[4940]: I1126 08:35:41.166186 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:35:41 crc kubenswrapper[4940]: E1126 08:35:41.167186 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:35:56 crc kubenswrapper[4940]: I1126 08:35:56.166105 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:35:56 crc kubenswrapper[4940]: E1126 08:35:56.168189 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:36:08 crc kubenswrapper[4940]: I1126 08:36:08.166397 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:36:08 crc kubenswrapper[4940]: E1126 08:36:08.167982 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:36:22 crc kubenswrapper[4940]: I1126 08:36:22.165568 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:36:22 crc kubenswrapper[4940]: E1126 08:36:22.166315 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:36:37 crc kubenswrapper[4940]: I1126 08:36:37.165410 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:36:37 crc kubenswrapper[4940]: E1126 08:36:37.166441 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:36:49 crc kubenswrapper[4940]: I1126 08:36:49.169241 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:36:49 crc kubenswrapper[4940]: E1126 08:36:49.171404 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:37:04 crc kubenswrapper[4940]: I1126 08:37:04.165360 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:37:04 crc kubenswrapper[4940]: E1126 08:37:04.167249 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:37:19 crc kubenswrapper[4940]: I1126 08:37:19.183157 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:37:19 crc kubenswrapper[4940]: E1126 08:37:19.184141 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:37:33 crc kubenswrapper[4940]: I1126 08:37:33.165810 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:37:33 crc kubenswrapper[4940]: E1126 08:37:33.166671 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:37:45 crc kubenswrapper[4940]: I1126 08:37:45.165491 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:37:45 crc kubenswrapper[4940]: E1126 08:37:45.166246 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:37:59 crc kubenswrapper[4940]: I1126 08:37:59.169748 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:37:59 crc kubenswrapper[4940]: E1126 08:37:59.170338 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:38:10 crc kubenswrapper[4940]: I1126 08:38:10.166626 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:38:10 crc kubenswrapper[4940]: E1126 08:38:10.167673 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:38:25 crc kubenswrapper[4940]: I1126 08:38:25.167293 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:38:25 crc kubenswrapper[4940]: E1126 08:38:25.170505 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:38:39 crc kubenswrapper[4940]: I1126 08:38:39.170245 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:38:39 crc kubenswrapper[4940]: E1126 08:38:39.171114 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:38:54 crc kubenswrapper[4940]: I1126 08:38:54.166519 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:38:54 crc kubenswrapper[4940]: I1126 08:38:54.429438 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"ccc069e03298539b4573c9b4c189488e5f91f473e2a3e105b8467faf049684f1"} Nov 26 08:39:09 crc kubenswrapper[4940]: I1126 08:39:09.656549 4940 scope.go:117] "RemoveContainer" containerID="a818f671c621df8b4473b4411a397aa50692b46d90ae580991e3e74efaf4d11e" Nov 26 08:39:09 crc kubenswrapper[4940]: I1126 08:39:09.699407 4940 scope.go:117] "RemoveContainer" containerID="d3a2d3ac59f248cecc0368fd1d62c25521ed6af77b2d80850b916042ce7da41f" Nov 26 08:39:09 crc kubenswrapper[4940]: I1126 08:39:09.731523 4940 scope.go:117] "RemoveContainer" containerID="c126813ba56dd6bc14eb70fe8eaa031fae24f2b3bb849d3d2f583dc741bb47dc" Nov 26 08:39:09 crc kubenswrapper[4940]: I1126 08:39:09.774566 4940 scope.go:117] "RemoveContainer" containerID="bc8f3d3311796fa4d5f66f6a0c239dc93d5504d4140eca1b02e25b5ca9a61439" Nov 26 08:39:11 crc kubenswrapper[4940]: I1126 08:39:11.957660 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j64ww"] Nov 26 08:39:11 crc kubenswrapper[4940]: E1126 08:39:11.958354 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerName="extract-content" Nov 26 08:39:11 crc kubenswrapper[4940]: I1126 08:39:11.958371 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerName="extract-content" Nov 26 08:39:11 crc kubenswrapper[4940]: E1126 08:39:11.958407 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerName="extract-utilities" Nov 26 08:39:11 crc kubenswrapper[4940]: I1126 08:39:11.958416 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerName="extract-utilities" Nov 26 08:39:11 crc kubenswrapper[4940]: E1126 08:39:11.958450 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerName="registry-server" Nov 26 08:39:11 crc kubenswrapper[4940]: I1126 08:39:11.958459 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerName="registry-server" Nov 26 08:39:11 crc kubenswrapper[4940]: I1126 08:39:11.958648 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae1bcfc8-ec31-4dc6-8836-c620c450e9bb" containerName="registry-server" Nov 26 08:39:11 crc kubenswrapper[4940]: I1126 08:39:11.960006 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:11 crc kubenswrapper[4940]: I1126 08:39:11.977815 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j64ww"] Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.090076 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x2vs\" (UniqueName: \"kubernetes.io/projected/141da981-7987-464c-8041-c7e68d7b5a85-kube-api-access-6x2vs\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.090177 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-utilities\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.090272 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-catalog-content\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.191031 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x2vs\" (UniqueName: \"kubernetes.io/projected/141da981-7987-464c-8041-c7e68d7b5a85-kube-api-access-6x2vs\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.191114 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-utilities\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.191183 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-catalog-content\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.191652 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-catalog-content\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.191684 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-utilities\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.214203 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x2vs\" (UniqueName: \"kubernetes.io/projected/141da981-7987-464c-8041-c7e68d7b5a85-kube-api-access-6x2vs\") pod \"redhat-operators-j64ww\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.280305 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:12 crc kubenswrapper[4940]: I1126 08:39:12.739134 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j64ww"] Nov 26 08:39:12 crc kubenswrapper[4940]: W1126 08:39:12.743125 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod141da981_7987_464c_8041_c7e68d7b5a85.slice/crio-7109fbe16527c4c753ca95f9b1eebb2c976812bc357703bcc420c2067e66587e WatchSource:0}: Error finding container 7109fbe16527c4c753ca95f9b1eebb2c976812bc357703bcc420c2067e66587e: Status 404 returned error can't find the container with id 7109fbe16527c4c753ca95f9b1eebb2c976812bc357703bcc420c2067e66587e Nov 26 08:39:13 crc kubenswrapper[4940]: I1126 08:39:13.579884 4940 generic.go:334] "Generic (PLEG): container finished" podID="141da981-7987-464c-8041-c7e68d7b5a85" containerID="385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1" exitCode=0 Nov 26 08:39:13 crc kubenswrapper[4940]: I1126 08:39:13.579938 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j64ww" event={"ID":"141da981-7987-464c-8041-c7e68d7b5a85","Type":"ContainerDied","Data":"385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1"} Nov 26 08:39:13 crc kubenswrapper[4940]: I1126 08:39:13.580304 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j64ww" event={"ID":"141da981-7987-464c-8041-c7e68d7b5a85","Type":"ContainerStarted","Data":"7109fbe16527c4c753ca95f9b1eebb2c976812bc357703bcc420c2067e66587e"} Nov 26 08:39:14 crc kubenswrapper[4940]: I1126 08:39:14.593263 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j64ww" event={"ID":"141da981-7987-464c-8041-c7e68d7b5a85","Type":"ContainerStarted","Data":"64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35"} Nov 26 08:39:15 crc kubenswrapper[4940]: I1126 08:39:15.601995 4940 generic.go:334] "Generic (PLEG): container finished" podID="141da981-7987-464c-8041-c7e68d7b5a85" containerID="64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35" exitCode=0 Nov 26 08:39:15 crc kubenswrapper[4940]: I1126 08:39:15.602065 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j64ww" event={"ID":"141da981-7987-464c-8041-c7e68d7b5a85","Type":"ContainerDied","Data":"64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35"} Nov 26 08:39:16 crc kubenswrapper[4940]: I1126 08:39:16.632809 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j64ww" event={"ID":"141da981-7987-464c-8041-c7e68d7b5a85","Type":"ContainerStarted","Data":"0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92"} Nov 26 08:39:16 crc kubenswrapper[4940]: I1126 08:39:16.655679 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j64ww" podStartSLOduration=2.981041319 podStartE2EDuration="5.655662598s" podCreationTimestamp="2025-11-26 08:39:11 +0000 UTC" firstStartedPulling="2025-11-26 08:39:13.582447383 +0000 UTC m=+6255.102589002" lastFinishedPulling="2025-11-26 08:39:16.257068662 +0000 UTC m=+6257.777210281" observedRunningTime="2025-11-26 08:39:16.652171367 +0000 UTC m=+6258.172312986" watchObservedRunningTime="2025-11-26 08:39:16.655662598 +0000 UTC m=+6258.175804207" Nov 26 08:39:22 crc kubenswrapper[4940]: I1126 08:39:22.281317 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:22 crc kubenswrapper[4940]: I1126 08:39:22.281889 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:22 crc kubenswrapper[4940]: I1126 08:39:22.342205 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:22 crc kubenswrapper[4940]: I1126 08:39:22.727755 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:22 crc kubenswrapper[4940]: I1126 08:39:22.772392 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j64ww"] Nov 26 08:39:24 crc kubenswrapper[4940]: I1126 08:39:24.695700 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j64ww" podUID="141da981-7987-464c-8041-c7e68d7b5a85" containerName="registry-server" containerID="cri-o://0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92" gracePeriod=2 Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.173053 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.296140 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-utilities\") pod \"141da981-7987-464c-8041-c7e68d7b5a85\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.296192 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-catalog-content\") pod \"141da981-7987-464c-8041-c7e68d7b5a85\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.296233 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6x2vs\" (UniqueName: \"kubernetes.io/projected/141da981-7987-464c-8041-c7e68d7b5a85-kube-api-access-6x2vs\") pod \"141da981-7987-464c-8041-c7e68d7b5a85\" (UID: \"141da981-7987-464c-8041-c7e68d7b5a85\") " Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.298198 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-utilities" (OuterVolumeSpecName: "utilities") pod "141da981-7987-464c-8041-c7e68d7b5a85" (UID: "141da981-7987-464c-8041-c7e68d7b5a85"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.306239 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/141da981-7987-464c-8041-c7e68d7b5a85-kube-api-access-6x2vs" (OuterVolumeSpecName: "kube-api-access-6x2vs") pod "141da981-7987-464c-8041-c7e68d7b5a85" (UID: "141da981-7987-464c-8041-c7e68d7b5a85"). InnerVolumeSpecName "kube-api-access-6x2vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.398350 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6x2vs\" (UniqueName: \"kubernetes.io/projected/141da981-7987-464c-8041-c7e68d7b5a85-kube-api-access-6x2vs\") on node \"crc\" DevicePath \"\"" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.398381 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.707919 4940 generic.go:334] "Generic (PLEG): container finished" podID="141da981-7987-464c-8041-c7e68d7b5a85" containerID="0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92" exitCode=0 Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.707978 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j64ww" event={"ID":"141da981-7987-464c-8041-c7e68d7b5a85","Type":"ContainerDied","Data":"0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92"} Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.708004 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j64ww" event={"ID":"141da981-7987-464c-8041-c7e68d7b5a85","Type":"ContainerDied","Data":"7109fbe16527c4c753ca95f9b1eebb2c976812bc357703bcc420c2067e66587e"} Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.708023 4940 scope.go:117] "RemoveContainer" containerID="0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.708073 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j64ww" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.743875 4940 scope.go:117] "RemoveContainer" containerID="64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.768112 4940 scope.go:117] "RemoveContainer" containerID="385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.829510 4940 scope.go:117] "RemoveContainer" containerID="0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92" Nov 26 08:39:25 crc kubenswrapper[4940]: E1126 08:39:25.830787 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92\": container with ID starting with 0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92 not found: ID does not exist" containerID="0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.830857 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92"} err="failed to get container status \"0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92\": rpc error: code = NotFound desc = could not find container \"0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92\": container with ID starting with 0ca16b5876abf48ecdff53494a0564c7e8fba96688ad5411dc87caa93cbf2e92 not found: ID does not exist" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.830913 4940 scope.go:117] "RemoveContainer" containerID="64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35" Nov 26 08:39:25 crc kubenswrapper[4940]: E1126 08:39:25.831649 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35\": container with ID starting with 64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35 not found: ID does not exist" containerID="64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.831711 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35"} err="failed to get container status \"64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35\": rpc error: code = NotFound desc = could not find container \"64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35\": container with ID starting with 64060e0f8667b6cc2d1b0cd7f6b4ab3134cb4f6adaf259f52d45738e3f083a35 not found: ID does not exist" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.831751 4940 scope.go:117] "RemoveContainer" containerID="385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1" Nov 26 08:39:25 crc kubenswrapper[4940]: E1126 08:39:25.832822 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1\": container with ID starting with 385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1 not found: ID does not exist" containerID="385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1" Nov 26 08:39:25 crc kubenswrapper[4940]: I1126 08:39:25.832896 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1"} err="failed to get container status \"385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1\": rpc error: code = NotFound desc = could not find container \"385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1\": container with ID starting with 385c67ed8f851a4ee6758f56c1b2164b5d2f18673ff4e312db78edcfcc24adb1 not found: ID does not exist" Nov 26 08:39:26 crc kubenswrapper[4940]: I1126 08:39:26.234392 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "141da981-7987-464c-8041-c7e68d7b5a85" (UID: "141da981-7987-464c-8041-c7e68d7b5a85"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:39:26 crc kubenswrapper[4940]: I1126 08:39:26.314479 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/141da981-7987-464c-8041-c7e68d7b5a85-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:39:26 crc kubenswrapper[4940]: I1126 08:39:26.356720 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j64ww"] Nov 26 08:39:26 crc kubenswrapper[4940]: I1126 08:39:26.361638 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j64ww"] Nov 26 08:39:27 crc kubenswrapper[4940]: I1126 08:39:27.183920 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="141da981-7987-464c-8041-c7e68d7b5a85" path="/var/lib/kubelet/pods/141da981-7987-464c-8041-c7e68d7b5a85/volumes" Nov 26 08:41:21 crc kubenswrapper[4940]: I1126 08:41:21.727963 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:41:21 crc kubenswrapper[4940]: I1126 08:41:21.728515 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:41:51 crc kubenswrapper[4940]: I1126 08:41:51.728981 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:41:51 crc kubenswrapper[4940]: I1126 08:41:51.729668 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:42:21 crc kubenswrapper[4940]: I1126 08:42:21.729097 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:42:21 crc kubenswrapper[4940]: I1126 08:42:21.729654 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:42:21 crc kubenswrapper[4940]: I1126 08:42:21.729706 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:42:21 crc kubenswrapper[4940]: I1126 08:42:21.730443 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ccc069e03298539b4573c9b4c189488e5f91f473e2a3e105b8467faf049684f1"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:42:21 crc kubenswrapper[4940]: I1126 08:42:21.730501 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://ccc069e03298539b4573c9b4c189488e5f91f473e2a3e105b8467faf049684f1" gracePeriod=600 Nov 26 08:42:22 crc kubenswrapper[4940]: I1126 08:42:22.287468 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="ccc069e03298539b4573c9b4c189488e5f91f473e2a3e105b8467faf049684f1" exitCode=0 Nov 26 08:42:22 crc kubenswrapper[4940]: I1126 08:42:22.287556 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"ccc069e03298539b4573c9b4c189488e5f91f473e2a3e105b8467faf049684f1"} Nov 26 08:42:22 crc kubenswrapper[4940]: I1126 08:42:22.287813 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad"} Nov 26 08:42:22 crc kubenswrapper[4940]: I1126 08:42:22.287835 4940 scope.go:117] "RemoveContainer" containerID="56c5b095cea68cc8e5f0a1ea700ce1c238b12933e0749cc1445e5dac2156ebc4" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.176784 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 08:42:52 crc kubenswrapper[4940]: E1126 08:42:52.178780 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141da981-7987-464c-8041-c7e68d7b5a85" containerName="extract-utilities" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.178827 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="141da981-7987-464c-8041-c7e68d7b5a85" containerName="extract-utilities" Nov 26 08:42:52 crc kubenswrapper[4940]: E1126 08:42:52.178860 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141da981-7987-464c-8041-c7e68d7b5a85" containerName="registry-server" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.178873 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="141da981-7987-464c-8041-c7e68d7b5a85" containerName="registry-server" Nov 26 08:42:52 crc kubenswrapper[4940]: E1126 08:42:52.178897 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141da981-7987-464c-8041-c7e68d7b5a85" containerName="extract-content" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.178910 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="141da981-7987-464c-8041-c7e68d7b5a85" containerName="extract-content" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.179371 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="141da981-7987-464c-8041-c7e68d7b5a85" containerName="registry-server" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.180261 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.183068 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-krr5d" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.196111 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.279138 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\") pod \"mariadb-copy-data\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") " pod="openstack/mariadb-copy-data" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.279541 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7fdw\" (UniqueName: \"kubernetes.io/projected/109dbcfc-960f-4aab-a9ad-fa756001dca4-kube-api-access-q7fdw\") pod \"mariadb-copy-data\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") " pod="openstack/mariadb-copy-data" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.380992 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7fdw\" (UniqueName: \"kubernetes.io/projected/109dbcfc-960f-4aab-a9ad-fa756001dca4-kube-api-access-q7fdw\") pod \"mariadb-copy-data\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") " pod="openstack/mariadb-copy-data" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.381089 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\") pod \"mariadb-copy-data\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") " pod="openstack/mariadb-copy-data" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.385673 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.385735 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\") pod \"mariadb-copy-data\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6755d794731629307cd227e45abe6b4cb09ccf5bc259b623e8356f84e7ce6300/globalmount\"" pod="openstack/mariadb-copy-data" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.405278 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7fdw\" (UniqueName: \"kubernetes.io/projected/109dbcfc-960f-4aab-a9ad-fa756001dca4-kube-api-access-q7fdw\") pod \"mariadb-copy-data\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") " pod="openstack/mariadb-copy-data" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.424484 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\") pod \"mariadb-copy-data\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") " pod="openstack/mariadb-copy-data" Nov 26 08:42:52 crc kubenswrapper[4940]: I1126 08:42:52.506843 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.021450 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.486514 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4fd8n"] Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.488924 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.510106 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4fd8n"] Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.598923 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"109dbcfc-960f-4aab-a9ad-fa756001dca4","Type":"ContainerStarted","Data":"99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055"} Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.599184 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"109dbcfc-960f-4aab-a9ad-fa756001dca4","Type":"ContainerStarted","Data":"e39ccfbf2f849d443a0b68f6d02b3fc163ba5728d521bcb9e02dc7160eb77bb4"} Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.599725 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-catalog-content\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.599778 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-utilities\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.599799 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57ckk\" (UniqueName: \"kubernetes.io/projected/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-kube-api-access-57ckk\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.615032 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=2.615015669 podStartE2EDuration="2.615015669s" podCreationTimestamp="2025-11-26 08:42:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:42:53.611803156 +0000 UTC m=+6475.131944785" watchObservedRunningTime="2025-11-26 08:42:53.615015669 +0000 UTC m=+6475.135157298" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.701201 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-catalog-content\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.701283 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-utilities\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.701327 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57ckk\" (UniqueName: \"kubernetes.io/projected/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-kube-api-access-57ckk\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.701827 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-catalog-content\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.701971 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-utilities\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.721116 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57ckk\" (UniqueName: \"kubernetes.io/projected/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-kube-api-access-57ckk\") pod \"certified-operators-4fd8n\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:53 crc kubenswrapper[4940]: I1126 08:42:53.816528 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:42:54 crc kubenswrapper[4940]: I1126 08:42:54.175083 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4fd8n"] Nov 26 08:42:54 crc kubenswrapper[4940]: I1126 08:42:54.607302 4940 generic.go:334] "Generic (PLEG): container finished" podID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerID="78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf" exitCode=0 Nov 26 08:42:54 crc kubenswrapper[4940]: I1126 08:42:54.607338 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fd8n" event={"ID":"01c542e9-9925-4065-ba4c-bbe6c3d85bc0","Type":"ContainerDied","Data":"78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf"} Nov 26 08:42:54 crc kubenswrapper[4940]: I1126 08:42:54.607698 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fd8n" event={"ID":"01c542e9-9925-4065-ba4c-bbe6c3d85bc0","Type":"ContainerStarted","Data":"5ba3119fc6a66da16ec84441036acae84e1a9a07959138b0817e0f1bf43e3c12"} Nov 26 08:42:54 crc kubenswrapper[4940]: I1126 08:42:54.609315 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:42:55 crc kubenswrapper[4940]: I1126 08:42:55.615136 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fd8n" event={"ID":"01c542e9-9925-4065-ba4c-bbe6c3d85bc0","Type":"ContainerStarted","Data":"46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce"} Nov 26 08:42:56 crc kubenswrapper[4940]: I1126 08:42:56.628787 4940 generic.go:334] "Generic (PLEG): container finished" podID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerID="46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce" exitCode=0 Nov 26 08:42:56 crc kubenswrapper[4940]: I1126 08:42:56.628994 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fd8n" event={"ID":"01c542e9-9925-4065-ba4c-bbe6c3d85bc0","Type":"ContainerDied","Data":"46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce"} Nov 26 08:42:56 crc kubenswrapper[4940]: I1126 08:42:56.851404 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 26 08:42:56 crc kubenswrapper[4940]: I1126 08:42:56.852639 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 08:42:56 crc kubenswrapper[4940]: I1126 08:42:56.869399 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 26 08:42:56 crc kubenswrapper[4940]: I1126 08:42:56.964178 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6shs\" (UniqueName: \"kubernetes.io/projected/8934acbf-8898-41c0-a562-45a5838db397-kube-api-access-t6shs\") pod \"mariadb-client\" (UID: \"8934acbf-8898-41c0-a562-45a5838db397\") " pod="openstack/mariadb-client" Nov 26 08:42:57 crc kubenswrapper[4940]: I1126 08:42:57.065661 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6shs\" (UniqueName: \"kubernetes.io/projected/8934acbf-8898-41c0-a562-45a5838db397-kube-api-access-t6shs\") pod \"mariadb-client\" (UID: \"8934acbf-8898-41c0-a562-45a5838db397\") " pod="openstack/mariadb-client" Nov 26 08:42:57 crc kubenswrapper[4940]: I1126 08:42:57.102695 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6shs\" (UniqueName: \"kubernetes.io/projected/8934acbf-8898-41c0-a562-45a5838db397-kube-api-access-t6shs\") pod \"mariadb-client\" (UID: \"8934acbf-8898-41c0-a562-45a5838db397\") " pod="openstack/mariadb-client" Nov 26 08:42:57 crc kubenswrapper[4940]: I1126 08:42:57.181637 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 08:42:57 crc kubenswrapper[4940]: I1126 08:42:57.479698 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 26 08:42:57 crc kubenswrapper[4940]: W1126 08:42:57.488951 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8934acbf_8898_41c0_a562_45a5838db397.slice/crio-42a88ad02747d08aa89e33603c7c1f9232b24e1b16f10ecc76e2213d0ac52ad4 WatchSource:0}: Error finding container 42a88ad02747d08aa89e33603c7c1f9232b24e1b16f10ecc76e2213d0ac52ad4: Status 404 returned error can't find the container with id 42a88ad02747d08aa89e33603c7c1f9232b24e1b16f10ecc76e2213d0ac52ad4 Nov 26 08:42:57 crc kubenswrapper[4940]: I1126 08:42:57.638066 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"8934acbf-8898-41c0-a562-45a5838db397","Type":"ContainerStarted","Data":"42a88ad02747d08aa89e33603c7c1f9232b24e1b16f10ecc76e2213d0ac52ad4"} Nov 26 08:42:57 crc kubenswrapper[4940]: I1126 08:42:57.640336 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fd8n" event={"ID":"01c542e9-9925-4065-ba4c-bbe6c3d85bc0","Type":"ContainerStarted","Data":"e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257"} Nov 26 08:42:57 crc kubenswrapper[4940]: I1126 08:42:57.664017 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4fd8n" podStartSLOduration=2.164132107 podStartE2EDuration="4.663992387s" podCreationTimestamp="2025-11-26 08:42:53 +0000 UTC" firstStartedPulling="2025-11-26 08:42:54.609117597 +0000 UTC m=+6476.129259216" lastFinishedPulling="2025-11-26 08:42:57.108977877 +0000 UTC m=+6478.629119496" observedRunningTime="2025-11-26 08:42:57.663646956 +0000 UTC m=+6479.183788595" watchObservedRunningTime="2025-11-26 08:42:57.663992387 +0000 UTC m=+6479.184134006" Nov 26 08:42:58 crc kubenswrapper[4940]: I1126 08:42:58.650312 4940 generic.go:334] "Generic (PLEG): container finished" podID="8934acbf-8898-41c0-a562-45a5838db397" containerID="743ca3bc7c8f8be1b5d0ef7a3636b46db6d36da48ee60a596ad44b094fdb7714" exitCode=0 Nov 26 08:42:58 crc kubenswrapper[4940]: I1126 08:42:58.650443 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"8934acbf-8898-41c0-a562-45a5838db397","Type":"ContainerDied","Data":"743ca3bc7c8f8be1b5d0ef7a3636b46db6d36da48ee60a596ad44b094fdb7714"} Nov 26 08:42:59 crc kubenswrapper[4940]: I1126 08:42:59.966809 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 08:42:59 crc kubenswrapper[4940]: I1126 08:42:59.991451 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_8934acbf-8898-41c0-a562-45a5838db397/mariadb-client/0.log" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.015894 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.024543 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.120342 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6shs\" (UniqueName: \"kubernetes.io/projected/8934acbf-8898-41c0-a562-45a5838db397-kube-api-access-t6shs\") pod \"8934acbf-8898-41c0-a562-45a5838db397\" (UID: \"8934acbf-8898-41c0-a562-45a5838db397\") " Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.128081 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8934acbf-8898-41c0-a562-45a5838db397-kube-api-access-t6shs" (OuterVolumeSpecName: "kube-api-access-t6shs") pod "8934acbf-8898-41c0-a562-45a5838db397" (UID: "8934acbf-8898-41c0-a562-45a5838db397"). InnerVolumeSpecName "kube-api-access-t6shs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.140032 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 26 08:43:00 crc kubenswrapper[4940]: E1126 08:43:00.140412 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8934acbf-8898-41c0-a562-45a5838db397" containerName="mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.140433 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8934acbf-8898-41c0-a562-45a5838db397" containerName="mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.140657 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8934acbf-8898-41c0-a562-45a5838db397" containerName="mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.141324 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.147953 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.222168 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2ssp\" (UniqueName: \"kubernetes.io/projected/f68a3ec7-1bab-402f-aec2-1e86ac6032a0-kube-api-access-z2ssp\") pod \"mariadb-client\" (UID: \"f68a3ec7-1bab-402f-aec2-1e86ac6032a0\") " pod="openstack/mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.222428 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6shs\" (UniqueName: \"kubernetes.io/projected/8934acbf-8898-41c0-a562-45a5838db397-kube-api-access-t6shs\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.323996 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2ssp\" (UniqueName: \"kubernetes.io/projected/f68a3ec7-1bab-402f-aec2-1e86ac6032a0-kube-api-access-z2ssp\") pod \"mariadb-client\" (UID: \"f68a3ec7-1bab-402f-aec2-1e86ac6032a0\") " pod="openstack/mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.341538 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2ssp\" (UniqueName: \"kubernetes.io/projected/f68a3ec7-1bab-402f-aec2-1e86ac6032a0-kube-api-access-z2ssp\") pod \"mariadb-client\" (UID: \"f68a3ec7-1bab-402f-aec2-1e86ac6032a0\") " pod="openstack/mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.479466 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.671026 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42a88ad02747d08aa89e33603c7c1f9232b24e1b16f10ecc76e2213d0ac52ad4" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.671105 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 08:43:00 crc kubenswrapper[4940]: I1126 08:43:00.707145 4940 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="8934acbf-8898-41c0-a562-45a5838db397" podUID="f68a3ec7-1bab-402f-aec2-1e86ac6032a0" Nov 26 08:43:01 crc kubenswrapper[4940]: I1126 08:43:01.046504 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 26 08:43:01 crc kubenswrapper[4940]: W1126 08:43:01.051632 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf68a3ec7_1bab_402f_aec2_1e86ac6032a0.slice/crio-85d37ca99a2bb6e5788535d3997f104e5441c5fca91aa211178af407c7509c2e WatchSource:0}: Error finding container 85d37ca99a2bb6e5788535d3997f104e5441c5fca91aa211178af407c7509c2e: Status 404 returned error can't find the container with id 85d37ca99a2bb6e5788535d3997f104e5441c5fca91aa211178af407c7509c2e Nov 26 08:43:01 crc kubenswrapper[4940]: I1126 08:43:01.174451 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8934acbf-8898-41c0-a562-45a5838db397" path="/var/lib/kubelet/pods/8934acbf-8898-41c0-a562-45a5838db397/volumes" Nov 26 08:43:01 crc kubenswrapper[4940]: I1126 08:43:01.679554 4940 generic.go:334] "Generic (PLEG): container finished" podID="f68a3ec7-1bab-402f-aec2-1e86ac6032a0" containerID="d136bb8b9913cd7f79945d1f887a9b4f6b64303e30e5a0fed348ef1165c3ab7c" exitCode=0 Nov 26 08:43:01 crc kubenswrapper[4940]: I1126 08:43:01.679603 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"f68a3ec7-1bab-402f-aec2-1e86ac6032a0","Type":"ContainerDied","Data":"d136bb8b9913cd7f79945d1f887a9b4f6b64303e30e5a0fed348ef1165c3ab7c"} Nov 26 08:43:01 crc kubenswrapper[4940]: I1126 08:43:01.679652 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"f68a3ec7-1bab-402f-aec2-1e86ac6032a0","Type":"ContainerStarted","Data":"85d37ca99a2bb6e5788535d3997f104e5441c5fca91aa211178af407c7509c2e"} Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.005361 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.027810 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_f68a3ec7-1bab-402f-aec2-1e86ac6032a0/mariadb-client/0.log" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.058937 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.070578 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2ssp\" (UniqueName: \"kubernetes.io/projected/f68a3ec7-1bab-402f-aec2-1e86ac6032a0-kube-api-access-z2ssp\") pod \"f68a3ec7-1bab-402f-aec2-1e86ac6032a0\" (UID: \"f68a3ec7-1bab-402f-aec2-1e86ac6032a0\") " Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.081207 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.087185 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f68a3ec7-1bab-402f-aec2-1e86ac6032a0-kube-api-access-z2ssp" (OuterVolumeSpecName: "kube-api-access-z2ssp") pod "f68a3ec7-1bab-402f-aec2-1e86ac6032a0" (UID: "f68a3ec7-1bab-402f-aec2-1e86ac6032a0"). InnerVolumeSpecName "kube-api-access-z2ssp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.173157 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2ssp\" (UniqueName: \"kubernetes.io/projected/f68a3ec7-1bab-402f-aec2-1e86ac6032a0-kube-api-access-z2ssp\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.178333 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f68a3ec7-1bab-402f-aec2-1e86ac6032a0" path="/var/lib/kubelet/pods/f68a3ec7-1bab-402f-aec2-1e86ac6032a0/volumes" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.696586 4940 scope.go:117] "RemoveContainer" containerID="d136bb8b9913cd7f79945d1f887a9b4f6b64303e30e5a0fed348ef1165c3ab7c" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.696609 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.816685 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.816750 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:43:03 crc kubenswrapper[4940]: I1126 08:43:03.872799 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:43:04 crc kubenswrapper[4940]: I1126 08:43:04.793717 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:43:04 crc kubenswrapper[4940]: I1126 08:43:04.845575 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4fd8n"] Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.537916 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-g228t"] Nov 26 08:43:06 crc kubenswrapper[4940]: E1126 08:43:06.538611 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f68a3ec7-1bab-402f-aec2-1e86ac6032a0" containerName="mariadb-client" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.538627 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f68a3ec7-1bab-402f-aec2-1e86ac6032a0" containerName="mariadb-client" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.538830 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f68a3ec7-1bab-402f-aec2-1e86ac6032a0" containerName="mariadb-client" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.541196 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.547526 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g228t"] Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.623740 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvtrw\" (UniqueName: \"kubernetes.io/projected/e309caba-3a39-4249-ad2b-52d506e56bee-kube-api-access-kvtrw\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.623826 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-utilities\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.623861 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-catalog-content\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.721891 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4fd8n" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerName="registry-server" containerID="cri-o://e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257" gracePeriod=2 Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.725308 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvtrw\" (UniqueName: \"kubernetes.io/projected/e309caba-3a39-4249-ad2b-52d506e56bee-kube-api-access-kvtrw\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.725416 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-utilities\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.725466 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-catalog-content\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.726159 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-catalog-content\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.726695 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-utilities\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.744883 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvtrw\" (UniqueName: \"kubernetes.io/projected/e309caba-3a39-4249-ad2b-52d506e56bee-kube-api-access-kvtrw\") pod \"redhat-marketplace-g228t\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:06 crc kubenswrapper[4940]: I1126 08:43:06.860544 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.136820 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.232530 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-catalog-content\") pod \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.232599 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57ckk\" (UniqueName: \"kubernetes.io/projected/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-kube-api-access-57ckk\") pod \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.232682 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-utilities\") pod \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\" (UID: \"01c542e9-9925-4065-ba4c-bbe6c3d85bc0\") " Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.236940 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-utilities" (OuterVolumeSpecName: "utilities") pod "01c542e9-9925-4065-ba4c-bbe6c3d85bc0" (UID: "01c542e9-9925-4065-ba4c-bbe6c3d85bc0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.238317 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g228t"] Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.255386 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-kube-api-access-57ckk" (OuterVolumeSpecName: "kube-api-access-57ckk") pod "01c542e9-9925-4065-ba4c-bbe6c3d85bc0" (UID: "01c542e9-9925-4065-ba4c-bbe6c3d85bc0"). InnerVolumeSpecName "kube-api-access-57ckk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.295779 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "01c542e9-9925-4065-ba4c-bbe6c3d85bc0" (UID: "01c542e9-9925-4065-ba4c-bbe6c3d85bc0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.333856 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.333883 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.333894 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57ckk\" (UniqueName: \"kubernetes.io/projected/01c542e9-9925-4065-ba4c-bbe6c3d85bc0-kube-api-access-57ckk\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.730103 4940 generic.go:334] "Generic (PLEG): container finished" podID="e309caba-3a39-4249-ad2b-52d506e56bee" containerID="ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de" exitCode=0 Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.730234 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g228t" event={"ID":"e309caba-3a39-4249-ad2b-52d506e56bee","Type":"ContainerDied","Data":"ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de"} Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.730457 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g228t" event={"ID":"e309caba-3a39-4249-ad2b-52d506e56bee","Type":"ContainerStarted","Data":"c3c645cb41c495db5315ae9595056773ee4d8d27150e1509669a0661c8104bbc"} Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.734597 4940 generic.go:334] "Generic (PLEG): container finished" podID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerID="e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257" exitCode=0 Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.734624 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fd8n" event={"ID":"01c542e9-9925-4065-ba4c-bbe6c3d85bc0","Type":"ContainerDied","Data":"e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257"} Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.734655 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fd8n" event={"ID":"01c542e9-9925-4065-ba4c-bbe6c3d85bc0","Type":"ContainerDied","Data":"5ba3119fc6a66da16ec84441036acae84e1a9a07959138b0817e0f1bf43e3c12"} Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.734678 4940 scope.go:117] "RemoveContainer" containerID="e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.734674 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4fd8n" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.756549 4940 scope.go:117] "RemoveContainer" containerID="46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.800953 4940 scope.go:117] "RemoveContainer" containerID="78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.803394 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4fd8n"] Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.809844 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4fd8n"] Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.827039 4940 scope.go:117] "RemoveContainer" containerID="e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257" Nov 26 08:43:07 crc kubenswrapper[4940]: E1126 08:43:07.827426 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257\": container with ID starting with e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257 not found: ID does not exist" containerID="e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.827471 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257"} err="failed to get container status \"e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257\": rpc error: code = NotFound desc = could not find container \"e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257\": container with ID starting with e1583857002a6a8ca269b6de7171b86a7f3bdc480a897fc99ae84c5920e9c257 not found: ID does not exist" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.827499 4940 scope.go:117] "RemoveContainer" containerID="46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce" Nov 26 08:43:07 crc kubenswrapper[4940]: E1126 08:43:07.827757 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce\": container with ID starting with 46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce not found: ID does not exist" containerID="46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.827789 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce"} err="failed to get container status \"46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce\": rpc error: code = NotFound desc = could not find container \"46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce\": container with ID starting with 46caf36a54b934a83373711ab88af0a70b5e7f31896beedd661e2967740fc6ce not found: ID does not exist" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.827826 4940 scope.go:117] "RemoveContainer" containerID="78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf" Nov 26 08:43:07 crc kubenswrapper[4940]: E1126 08:43:07.828056 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf\": container with ID starting with 78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf not found: ID does not exist" containerID="78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf" Nov 26 08:43:07 crc kubenswrapper[4940]: I1126 08:43:07.828085 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf"} err="failed to get container status \"78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf\": rpc error: code = NotFound desc = could not find container \"78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf\": container with ID starting with 78267e4e12344dd811f56621f800905e0cc4eb78e70c341852de3cfe0810cfdf not found: ID does not exist" Nov 26 08:43:08 crc kubenswrapper[4940]: I1126 08:43:08.745006 4940 generic.go:334] "Generic (PLEG): container finished" podID="e309caba-3a39-4249-ad2b-52d506e56bee" containerID="ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb" exitCode=0 Nov 26 08:43:08 crc kubenswrapper[4940]: I1126 08:43:08.745328 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g228t" event={"ID":"e309caba-3a39-4249-ad2b-52d506e56bee","Type":"ContainerDied","Data":"ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb"} Nov 26 08:43:09 crc kubenswrapper[4940]: I1126 08:43:09.176334 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" path="/var/lib/kubelet/pods/01c542e9-9925-4065-ba4c-bbe6c3d85bc0/volumes" Nov 26 08:43:09 crc kubenswrapper[4940]: I1126 08:43:09.754898 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g228t" event={"ID":"e309caba-3a39-4249-ad2b-52d506e56bee","Type":"ContainerStarted","Data":"d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17"} Nov 26 08:43:09 crc kubenswrapper[4940]: I1126 08:43:09.779797 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-g228t" podStartSLOduration=2.266344667 podStartE2EDuration="3.779780234s" podCreationTimestamp="2025-11-26 08:43:06 +0000 UTC" firstStartedPulling="2025-11-26 08:43:07.73177525 +0000 UTC m=+6489.251916869" lastFinishedPulling="2025-11-26 08:43:09.245210817 +0000 UTC m=+6490.765352436" observedRunningTime="2025-11-26 08:43:09.773624017 +0000 UTC m=+6491.293765666" watchObservedRunningTime="2025-11-26 08:43:09.779780234 +0000 UTC m=+6491.299921853" Nov 26 08:43:16 crc kubenswrapper[4940]: I1126 08:43:16.861684 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:16 crc kubenswrapper[4940]: I1126 08:43:16.862198 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:16 crc kubenswrapper[4940]: I1126 08:43:16.903237 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:17 crc kubenswrapper[4940]: I1126 08:43:17.894019 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:17 crc kubenswrapper[4940]: I1126 08:43:17.954802 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g228t"] Nov 26 08:43:19 crc kubenswrapper[4940]: I1126 08:43:19.843859 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-g228t" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" containerName="registry-server" containerID="cri-o://d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17" gracePeriod=2 Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.361675 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.440229 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-catalog-content\") pod \"e309caba-3a39-4249-ad2b-52d506e56bee\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.440359 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-utilities\") pod \"e309caba-3a39-4249-ad2b-52d506e56bee\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.440470 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvtrw\" (UniqueName: \"kubernetes.io/projected/e309caba-3a39-4249-ad2b-52d506e56bee-kube-api-access-kvtrw\") pod \"e309caba-3a39-4249-ad2b-52d506e56bee\" (UID: \"e309caba-3a39-4249-ad2b-52d506e56bee\") " Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.441687 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-utilities" (OuterVolumeSpecName: "utilities") pod "e309caba-3a39-4249-ad2b-52d506e56bee" (UID: "e309caba-3a39-4249-ad2b-52d506e56bee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.446231 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e309caba-3a39-4249-ad2b-52d506e56bee-kube-api-access-kvtrw" (OuterVolumeSpecName: "kube-api-access-kvtrw") pod "e309caba-3a39-4249-ad2b-52d506e56bee" (UID: "e309caba-3a39-4249-ad2b-52d506e56bee"). InnerVolumeSpecName "kube-api-access-kvtrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.468679 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e309caba-3a39-4249-ad2b-52d506e56bee" (UID: "e309caba-3a39-4249-ad2b-52d506e56bee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.541833 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.541884 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvtrw\" (UniqueName: \"kubernetes.io/projected/e309caba-3a39-4249-ad2b-52d506e56bee-kube-api-access-kvtrw\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.541905 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e309caba-3a39-4249-ad2b-52d506e56bee-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.857486 4940 generic.go:334] "Generic (PLEG): container finished" podID="e309caba-3a39-4249-ad2b-52d506e56bee" containerID="d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17" exitCode=0 Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.857546 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g228t" event={"ID":"e309caba-3a39-4249-ad2b-52d506e56bee","Type":"ContainerDied","Data":"d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17"} Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.857596 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g228t" event={"ID":"e309caba-3a39-4249-ad2b-52d506e56bee","Type":"ContainerDied","Data":"c3c645cb41c495db5315ae9595056773ee4d8d27150e1509669a0661c8104bbc"} Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.857619 4940 scope.go:117] "RemoveContainer" containerID="d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.857550 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g228t" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.879973 4940 scope.go:117] "RemoveContainer" containerID="ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.900597 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g228t"] Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.907802 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-g228t"] Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.910093 4940 scope.go:117] "RemoveContainer" containerID="ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.935666 4940 scope.go:117] "RemoveContainer" containerID="d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17" Nov 26 08:43:20 crc kubenswrapper[4940]: E1126 08:43:20.936153 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17\": container with ID starting with d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17 not found: ID does not exist" containerID="d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.936196 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17"} err="failed to get container status \"d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17\": rpc error: code = NotFound desc = could not find container \"d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17\": container with ID starting with d4ab98bb0b4e8b746476d4af926cb074b47902459054ff9b70d3efe5fe778a17 not found: ID does not exist" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.936231 4940 scope.go:117] "RemoveContainer" containerID="ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb" Nov 26 08:43:20 crc kubenswrapper[4940]: E1126 08:43:20.936618 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb\": container with ID starting with ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb not found: ID does not exist" containerID="ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.936650 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb"} err="failed to get container status \"ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb\": rpc error: code = NotFound desc = could not find container \"ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb\": container with ID starting with ca38de327b8eb0833b9f522f0f13387e10b0b28fa67d5a3fe385f58768a3ceeb not found: ID does not exist" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.936678 4940 scope.go:117] "RemoveContainer" containerID="ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de" Nov 26 08:43:20 crc kubenswrapper[4940]: E1126 08:43:20.936920 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de\": container with ID starting with ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de not found: ID does not exist" containerID="ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de" Nov 26 08:43:20 crc kubenswrapper[4940]: I1126 08:43:20.936947 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de"} err="failed to get container status \"ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de\": rpc error: code = NotFound desc = could not find container \"ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de\": container with ID starting with ab9ae2d0c20d6096cbef4e90152bfa6913b9f37496ad84436645c276dbaaa3de not found: ID does not exist" Nov 26 08:43:21 crc kubenswrapper[4940]: I1126 08:43:21.174751 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" path="/var/lib/kubelet/pods/e309caba-3a39-4249-ad2b-52d506e56bee/volumes" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.214290 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 08:43:30 crc kubenswrapper[4940]: E1126 08:43:30.215440 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerName="registry-server" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.215453 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerName="registry-server" Nov 26 08:43:30 crc kubenswrapper[4940]: E1126 08:43:30.215464 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" containerName="registry-server" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.215469 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" containerName="registry-server" Nov 26 08:43:30 crc kubenswrapper[4940]: E1126 08:43:30.215482 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" containerName="extract-content" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.215488 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" containerName="extract-content" Nov 26 08:43:30 crc kubenswrapper[4940]: E1126 08:43:30.215516 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" containerName="extract-utilities" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.215522 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" containerName="extract-utilities" Nov 26 08:43:30 crc kubenswrapper[4940]: E1126 08:43:30.215532 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerName="extract-utilities" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.215736 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerName="extract-utilities" Nov 26 08:43:30 crc kubenswrapper[4940]: E1126 08:43:30.215748 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerName="extract-content" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.215754 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerName="extract-content" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.215926 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="01c542e9-9925-4065-ba4c-bbe6c3d85bc0" containerName="registry-server" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.215948 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e309caba-3a39-4249-ad2b-52d506e56bee" containerName="registry-server" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.217010 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.221669 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-wtrg7" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.221948 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.222134 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.243384 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.263599 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.264803 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.272431 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.273828 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.287387 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ca2e1407-cdff-467f-976b-25cd954ca90c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.287489 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ca2e1407-cdff-467f-976b-25cd954ca90c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.287529 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2e1407-cdff-467f-976b-25cd954ca90c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.287565 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca2e1407-cdff-467f-976b-25cd954ca90c-config\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.287593 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb4rq\" (UniqueName: \"kubernetes.io/projected/ca2e1407-cdff-467f-976b-25cd954ca90c-kube-api-access-cb4rq\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.287634 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f9c0c824-2d26-4aaa-90ef-7bf1e6884617\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9c0c824-2d26-4aaa-90ef-7bf1e6884617\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.300080 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.323964 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.390054 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d2b8bb2c-4e49-437e-a546-c844992436f5-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.390525 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.390568 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d2b8bb2c-4e49-437e-a546-c844992436f5-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.390586 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ft99g\" (UniqueName: \"kubernetes.io/projected/d2b8bb2c-4e49-437e-a546-c844992436f5-kube-api-access-ft99g\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.390759 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns2mt\" (UniqueName: \"kubernetes.io/projected/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-kube-api-access-ns2mt\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.390831 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-62e56396-59e7-4cb3-93a0-3321e390aa91\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-62e56396-59e7-4cb3-93a0-3321e390aa91\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.390863 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ca2e1407-cdff-467f-976b-25cd954ca90c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.390980 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ca2e1407-cdff-467f-976b-25cd954ca90c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391029 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2e1407-cdff-467f-976b-25cd954ca90c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391086 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391139 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca2e1407-cdff-467f-976b-25cd954ca90c-config\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391260 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb4rq\" (UniqueName: \"kubernetes.io/projected/ca2e1407-cdff-467f-976b-25cd954ca90c-kube-api-access-cb4rq\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391295 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2b8bb2c-4e49-437e-a546-c844992436f5-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391321 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-config\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391359 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f9c0c824-2d26-4aaa-90ef-7bf1e6884617\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9c0c824-2d26-4aaa-90ef-7bf1e6884617\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391416 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2b8bb2c-4e49-437e-a546-c844992436f5-config\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391442 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f8c164d2-7558-4adb-8490-b040d13d23f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c164d2-7558-4adb-8490-b040d13d23f9\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391481 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.391612 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ca2e1407-cdff-467f-976b-25cd954ca90c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.392980 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca2e1407-cdff-467f-976b-25cd954ca90c-config\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.393298 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ca2e1407-cdff-467f-976b-25cd954ca90c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.398112 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.398179 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f9c0c824-2d26-4aaa-90ef-7bf1e6884617\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9c0c824-2d26-4aaa-90ef-7bf1e6884617\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ad6e8bd63f34e3a7c1215742af52c986d8e384d0c5b9d8f6ce8daa0fef5b3d1b/globalmount\"" pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.409848 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2e1407-cdff-467f-976b-25cd954ca90c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.418248 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb4rq\" (UniqueName: \"kubernetes.io/projected/ca2e1407-cdff-467f-976b-25cd954ca90c-kube-api-access-cb4rq\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.432664 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.434609 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.438073 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.438316 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.438469 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-knh7v" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.442747 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f9c0c824-2d26-4aaa-90ef-7bf1e6884617\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f9c0c824-2d26-4aaa-90ef-7bf1e6884617\") pod \"ovsdbserver-nb-0\" (UID: \"ca2e1407-cdff-467f-976b-25cd954ca90c\") " pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.447859 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.451379 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.463000 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.465171 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.476622 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.486431 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492656 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6cfef967-d90d-4976-96ed-a6e729af1322\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6cfef967-d90d-4976-96ed-a6e729af1322\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492710 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2b8bb2c-4e49-437e-a546-c844992436f5-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492736 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-config\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492760 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492777 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-config\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492800 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2b8bb2c-4e49-437e-a546-c844992436f5-config\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492818 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f8c164d2-7558-4adb-8490-b040d13d23f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c164d2-7558-4adb-8490-b040d13d23f9\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492834 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492858 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492875 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d2b8bb2c-4e49-437e-a546-c844992436f5-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492894 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5e56e949-cc22-4716-ac23-e303a7a6ca5e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5e56e949-cc22-4716-ac23-e303a7a6ca5e\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492916 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492935 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d2b8bb2c-4e49-437e-a546-c844992436f5-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492952 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37cd323c-b236-4863-ad21-d6aaf9c48065-config\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492971 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ft99g\" (UniqueName: \"kubernetes.io/projected/d2b8bb2c-4e49-437e-a546-c844992436f5-kube-api-access-ft99g\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.492990 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nx7zs\" (UniqueName: \"kubernetes.io/projected/37cd323c-b236-4863-ad21-d6aaf9c48065-kube-api-access-nx7zs\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493013 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37cd323c-b236-4863-ad21-d6aaf9c48065-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493032 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37cd323c-b236-4863-ad21-d6aaf9c48065-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493067 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493088 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns2mt\" (UniqueName: \"kubernetes.io/projected/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-kube-api-access-ns2mt\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493113 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8qj2\" (UniqueName: \"kubernetes.io/projected/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-kube-api-access-b8qj2\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493140 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-62e56396-59e7-4cb3-93a0-3321e390aa91\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-62e56396-59e7-4cb3-93a0-3321e390aa91\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493158 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1210d1df-4b74-4f1b-83f3-e391e318adb4-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493185 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1210d1df-4b74-4f1b-83f3-e391e318adb4-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493201 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/37cd323c-b236-4863-ad21-d6aaf9c48065-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493224 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3a332507-45d6-4775-a8d9-f1ce5abdab00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3a332507-45d6-4775-a8d9-f1ce5abdab00\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493268 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d28m\" (UniqueName: \"kubernetes.io/projected/1210d1df-4b74-4f1b-83f3-e391e318adb4-kube-api-access-8d28m\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493314 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493338 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1210d1df-4b74-4f1b-83f3-e391e318adb4-config\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493377 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1210d1df-4b74-4f1b-83f3-e391e318adb4-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.494579 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-config\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.493905 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.494883 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.497565 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2b8bb2c-4e49-437e-a546-c844992436f5-config\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.497583 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d2b8bb2c-4e49-437e-a546-c844992436f5-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.497896 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d2b8bb2c-4e49-437e-a546-c844992436f5-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.497925 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.498515 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2b8bb2c-4e49-437e-a546-c844992436f5-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.499095 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.500613 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.500640 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-62e56396-59e7-4cb3-93a0-3321e390aa91\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-62e56396-59e7-4cb3-93a0-3321e390aa91\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7ec5938d057b1611bad45884260c2f1992a2de55b9054d2c07c1a1e47f0ad124/globalmount\"" pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.500724 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.500762 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f8c164d2-7558-4adb-8490-b040d13d23f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c164d2-7558-4adb-8490-b040d13d23f9\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/fd928054984a443b571cdfed173f69d5a8669f076cd7f2c535cdc84a1df3e814/globalmount\"" pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.510651 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns2mt\" (UniqueName: \"kubernetes.io/projected/d9b4a37c-681f-4b37-81d0-9444ce90ed8a-kube-api-access-ns2mt\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.512632 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ft99g\" (UniqueName: \"kubernetes.io/projected/d2b8bb2c-4e49-437e-a546-c844992436f5-kube-api-access-ft99g\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.528516 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-62e56396-59e7-4cb3-93a0-3321e390aa91\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-62e56396-59e7-4cb3-93a0-3321e390aa91\") pod \"ovsdbserver-nb-2\" (UID: \"d2b8bb2c-4e49-437e-a546-c844992436f5\") " pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.529453 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f8c164d2-7558-4adb-8490-b040d13d23f9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8c164d2-7558-4adb-8490-b040d13d23f9\") pod \"ovsdbserver-nb-1\" (UID: \"d9b4a37c-681f-4b37-81d0-9444ce90ed8a\") " pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.539169 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.594250 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1210d1df-4b74-4f1b-83f3-e391e318adb4-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.594310 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/37cd323c-b236-4863-ad21-d6aaf9c48065-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.594322 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.594349 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3a332507-45d6-4775-a8d9-f1ce5abdab00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3a332507-45d6-4775-a8d9-f1ce5abdab00\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595152 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d28m\" (UniqueName: \"kubernetes.io/projected/1210d1df-4b74-4f1b-83f3-e391e318adb4-kube-api-access-8d28m\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595234 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1210d1df-4b74-4f1b-83f3-e391e318adb4-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595267 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1210d1df-4b74-4f1b-83f3-e391e318adb4-config\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595353 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6cfef967-d90d-4976-96ed-a6e729af1322\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6cfef967-d90d-4976-96ed-a6e729af1322\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595388 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/37cd323c-b236-4863-ad21-d6aaf9c48065-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595437 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595476 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-config\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595523 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595581 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5e56e949-cc22-4716-ac23-e303a7a6ca5e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5e56e949-cc22-4716-ac23-e303a7a6ca5e\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595623 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37cd323c-b236-4863-ad21-d6aaf9c48065-config\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595753 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nx7zs\" (UniqueName: \"kubernetes.io/projected/37cd323c-b236-4863-ad21-d6aaf9c48065-kube-api-access-nx7zs\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595794 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37cd323c-b236-4863-ad21-d6aaf9c48065-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595820 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37cd323c-b236-4863-ad21-d6aaf9c48065-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595851 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595902 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8qj2\" (UniqueName: \"kubernetes.io/projected/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-kube-api-access-b8qj2\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.595943 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1210d1df-4b74-4f1b-83f3-e391e318adb4-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.596410 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1210d1df-4b74-4f1b-83f3-e391e318adb4-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.596595 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1210d1df-4b74-4f1b-83f3-e391e318adb4-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.596660 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1210d1df-4b74-4f1b-83f3-e391e318adb4-config\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.597298 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.597748 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-config\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.598081 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37cd323c-b236-4863-ad21-d6aaf9c48065-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.598435 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37cd323c-b236-4863-ad21-d6aaf9c48065-config\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.599800 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.601440 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.602170 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1210d1df-4b74-4f1b-83f3-e391e318adb4-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.603653 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.603689 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6cfef967-d90d-4976-96ed-a6e729af1322\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6cfef967-d90d-4976-96ed-a6e729af1322\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b399628ebf165617b20a12325a5bd8b201a467436ad4d6f7b2d2a104ef2cf0f6/globalmount\"" pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.603856 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.603912 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5e56e949-cc22-4716-ac23-e303a7a6ca5e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5e56e949-cc22-4716-ac23-e303a7a6ca5e\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/feea649816a0802a92960dac6b332b5e3429785ba5276a9fc2a09c2c46864a47/globalmount\"" pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.604960 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.606156 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37cd323c-b236-4863-ad21-d6aaf9c48065-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.608232 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.608285 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3a332507-45d6-4775-a8d9-f1ce5abdab00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3a332507-45d6-4775-a8d9-f1ce5abdab00\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/3e53c2546c38d719ef64a793fc1ca1d02a5a03b1213cf0aab07c39ac83c66f47/globalmount\"" pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.613438 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d28m\" (UniqueName: \"kubernetes.io/projected/1210d1df-4b74-4f1b-83f3-e391e318adb4-kube-api-access-8d28m\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.617610 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nx7zs\" (UniqueName: \"kubernetes.io/projected/37cd323c-b236-4863-ad21-d6aaf9c48065-kube-api-access-nx7zs\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.618007 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8qj2\" (UniqueName: \"kubernetes.io/projected/caeadb14-74d5-4e7a-aeb8-4026fe90f57b-kube-api-access-b8qj2\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.668397 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5e56e949-cc22-4716-ac23-e303a7a6ca5e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5e56e949-cc22-4716-ac23-e303a7a6ca5e\") pod \"ovsdbserver-sb-1\" (UID: \"37cd323c-b236-4863-ad21-d6aaf9c48065\") " pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.668561 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6cfef967-d90d-4976-96ed-a6e729af1322\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6cfef967-d90d-4976-96ed-a6e729af1322\") pod \"ovsdbserver-sb-2\" (UID: \"1210d1df-4b74-4f1b-83f3-e391e318adb4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.672789 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.680249 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3a332507-45d6-4775-a8d9-f1ce5abdab00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3a332507-45d6-4775-a8d9-f1ce5abdab00\") pod \"ovsdbserver-sb-0\" (UID: \"caeadb14-74d5-4e7a-aeb8-4026fe90f57b\") " pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.945677 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:30 crc kubenswrapper[4940]: I1126 08:43:30.956935 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.066268 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.185331 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.289820 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 26 08:43:31 crc kubenswrapper[4940]: W1126 08:43:31.295243 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1210d1df_4b74_4f1b_83f3_e391e318adb4.slice/crio-ff960f1ea572f20db658b64f3a904964c6ce8149d66b1194849bd437dabc74ef WatchSource:0}: Error finding container ff960f1ea572f20db658b64f3a904964c6ce8149d66b1194849bd437dabc74ef: Status 404 returned error can't find the container with id ff960f1ea572f20db658b64f3a904964c6ce8149d66b1194849bd437dabc74ef Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.515899 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.893211 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 26 08:43:31 crc kubenswrapper[4940]: W1126 08:43:31.912555 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2b8bb2c_4e49_437e_a546_c844992436f5.slice/crio-7f247b317011e7ad74a44e55a6a702b03e43846e8d930063f5ec4322012dcb35 WatchSource:0}: Error finding container 7f247b317011e7ad74a44e55a6a702b03e43846e8d930063f5ec4322012dcb35: Status 404 returned error can't find the container with id 7f247b317011e7ad74a44e55a6a702b03e43846e8d930063f5ec4322012dcb35 Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.944990 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"1210d1df-4b74-4f1b-83f3-e391e318adb4","Type":"ContainerStarted","Data":"ff960f1ea572f20db658b64f3a904964c6ce8149d66b1194849bd437dabc74ef"} Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.948067 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"d2b8bb2c-4e49-437e-a546-c844992436f5","Type":"ContainerStarted","Data":"7f247b317011e7ad74a44e55a6a702b03e43846e8d930063f5ec4322012dcb35"} Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.951548 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"d9b4a37c-681f-4b37-81d0-9444ce90ed8a","Type":"ContainerStarted","Data":"9258ff021c5fce2a03653eb284608febd5be3494e94218efcbf012dd6f7a2dc2"} Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.953807 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"caeadb14-74d5-4e7a-aeb8-4026fe90f57b","Type":"ContainerStarted","Data":"2848a7dd5660e895d0e35a3b1a39dd042a07d3fe512e33453c5fd82b85622bc9"} Nov 26 08:43:31 crc kubenswrapper[4940]: I1126 08:43:31.955627 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ca2e1407-cdff-467f-976b-25cd954ca90c","Type":"ContainerStarted","Data":"216b947000744d2086909fec66b9e2dbce9c463eea7e62f8c25defe1b67e44a1"} Nov 26 08:43:32 crc kubenswrapper[4940]: I1126 08:43:32.131022 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 26 08:43:32 crc kubenswrapper[4940]: W1126 08:43:32.148190 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37cd323c_b236_4863_ad21_d6aaf9c48065.slice/crio-024154f9fa2061139f59c2b123990d552980dbb62429052dbe4b73c5ebab947a WatchSource:0}: Error finding container 024154f9fa2061139f59c2b123990d552980dbb62429052dbe4b73c5ebab947a: Status 404 returned error can't find the container with id 024154f9fa2061139f59c2b123990d552980dbb62429052dbe4b73c5ebab947a Nov 26 08:43:32 crc kubenswrapper[4940]: I1126 08:43:32.963790 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"37cd323c-b236-4863-ad21-d6aaf9c48065","Type":"ContainerStarted","Data":"024154f9fa2061139f59c2b123990d552980dbb62429052dbe4b73c5ebab947a"} Nov 26 08:43:35 crc kubenswrapper[4940]: I1126 08:43:35.994578 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"1210d1df-4b74-4f1b-83f3-e391e318adb4","Type":"ContainerStarted","Data":"32c2fc3f1eaa3a9f17eaeaae24600d3d4a0267a940efda75e50d94dd01509cde"} Nov 26 08:43:35 crc kubenswrapper[4940]: I1126 08:43:35.995177 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"1210d1df-4b74-4f1b-83f3-e391e318adb4","Type":"ContainerStarted","Data":"93243b51c46890f5b029e90451e04345136d93f72172ea0ba5de7821e559632b"} Nov 26 08:43:35 crc kubenswrapper[4940]: I1126 08:43:35.996920 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"d2b8bb2c-4e49-437e-a546-c844992436f5","Type":"ContainerStarted","Data":"90b53b6f4e6cdda6560b756461aff233eb0888f96482e459a4fb119d480aece5"} Nov 26 08:43:35 crc kubenswrapper[4940]: I1126 08:43:35.996954 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"d2b8bb2c-4e49-437e-a546-c844992436f5","Type":"ContainerStarted","Data":"5b42811441175dfbea610043e0bd3a28df115ade24e490bb7a56496e61702e0a"} Nov 26 08:43:35 crc kubenswrapper[4940]: I1126 08:43:35.999573 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"d9b4a37c-681f-4b37-81d0-9444ce90ed8a","Type":"ContainerStarted","Data":"b98ccbb258de1eba6edecf1a5145cb5c24e6cc7722f5f24bd3ae0cd12a23567b"} Nov 26 08:43:35 crc kubenswrapper[4940]: I1126 08:43:35.999606 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"d9b4a37c-681f-4b37-81d0-9444ce90ed8a","Type":"ContainerStarted","Data":"d4781c560a27c325fc6030355df02b76b978e3c35b2e2f45c0ab75424bb66971"} Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.001723 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"caeadb14-74d5-4e7a-aeb8-4026fe90f57b","Type":"ContainerStarted","Data":"bc2258bcf50057e3064230f521aada5338b427d7390e49c75a935cc7926c74ab"} Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.001758 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"caeadb14-74d5-4e7a-aeb8-4026fe90f57b","Type":"ContainerStarted","Data":"34a4b1a34f4717df78d806dea8a9d5383b6c5105168a5dc39010ec1d14fb1388"} Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.004763 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"37cd323c-b236-4863-ad21-d6aaf9c48065","Type":"ContainerStarted","Data":"e331ee45e2e13eea9e560a12b34e35681df924edb8eb6465493bba6156f5b02b"} Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.004799 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"37cd323c-b236-4863-ad21-d6aaf9c48065","Type":"ContainerStarted","Data":"133761579e4d41502219abc83175d9d502196ec7d17a8754b6627562c806b64f"} Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.006652 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ca2e1407-cdff-467f-976b-25cd954ca90c","Type":"ContainerStarted","Data":"9ac76a039263e49309277161821cfb45829c2b26c759ff45b2077b9746cb174a"} Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.006705 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ca2e1407-cdff-467f-976b-25cd954ca90c","Type":"ContainerStarted","Data":"d92811542692ca06c42e7d2c213327bd1a95bcf2e26800a863c3f590642503d5"} Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.020949 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=3.114648954 podStartE2EDuration="7.020930056s" podCreationTimestamp="2025-11-26 08:43:29 +0000 UTC" firstStartedPulling="2025-11-26 08:43:31.29731193 +0000 UTC m=+6512.817453549" lastFinishedPulling="2025-11-26 08:43:35.203593032 +0000 UTC m=+6516.723734651" observedRunningTime="2025-11-26 08:43:36.014716787 +0000 UTC m=+6517.534858426" watchObservedRunningTime="2025-11-26 08:43:36.020930056 +0000 UTC m=+6517.541071675" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.053305 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.423492034 podStartE2EDuration="7.053279768s" podCreationTimestamp="2025-11-26 08:43:29 +0000 UTC" firstStartedPulling="2025-11-26 08:43:31.53440149 +0000 UTC m=+6513.054543109" lastFinishedPulling="2025-11-26 08:43:35.164189224 +0000 UTC m=+6516.684330843" observedRunningTime="2025-11-26 08:43:36.050286363 +0000 UTC m=+6517.570427992" watchObservedRunningTime="2025-11-26 08:43:36.053279768 +0000 UTC m=+6517.573421407" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.053952 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=3.67897903 podStartE2EDuration="7.05394501s" podCreationTimestamp="2025-11-26 08:43:29 +0000 UTC" firstStartedPulling="2025-11-26 08:43:32.150744046 +0000 UTC m=+6513.670885665" lastFinishedPulling="2025-11-26 08:43:35.525710026 +0000 UTC m=+6517.045851645" observedRunningTime="2025-11-26 08:43:36.03421967 +0000 UTC m=+6517.554361289" watchObservedRunningTime="2025-11-26 08:43:36.05394501 +0000 UTC m=+6517.574086639" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.076458 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=2.976931667 podStartE2EDuration="7.076439498s" podCreationTimestamp="2025-11-26 08:43:29 +0000 UTC" firstStartedPulling="2025-11-26 08:43:31.070209839 +0000 UTC m=+6512.590351458" lastFinishedPulling="2025-11-26 08:43:35.16971767 +0000 UTC m=+6516.689859289" observedRunningTime="2025-11-26 08:43:36.074532237 +0000 UTC m=+6517.594673866" watchObservedRunningTime="2025-11-26 08:43:36.076439498 +0000 UTC m=+6517.596581117" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.095034 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.127836126 podStartE2EDuration="7.095015112s" podCreationTimestamp="2025-11-26 08:43:29 +0000 UTC" firstStartedPulling="2025-11-26 08:43:31.196984467 +0000 UTC m=+6512.717126086" lastFinishedPulling="2025-11-26 08:43:35.164163453 +0000 UTC m=+6516.684305072" observedRunningTime="2025-11-26 08:43:36.092452779 +0000 UTC m=+6517.612594408" watchObservedRunningTime="2025-11-26 08:43:36.095015112 +0000 UTC m=+6517.615156731" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.115549 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.617399954 podStartE2EDuration="7.115526356s" podCreationTimestamp="2025-11-26 08:43:29 +0000 UTC" firstStartedPulling="2025-11-26 08:43:31.918480461 +0000 UTC m=+6513.438622080" lastFinishedPulling="2025-11-26 08:43:35.416606863 +0000 UTC m=+6516.936748482" observedRunningTime="2025-11-26 08:43:36.106719395 +0000 UTC m=+6517.626861014" watchObservedRunningTime="2025-11-26 08:43:36.115526356 +0000 UTC m=+6517.635667975" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.539639 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.595214 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.606006 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.672942 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.946636 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:36 crc kubenswrapper[4940]: I1126 08:43:36.958054 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.575069 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.576307 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.673273 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.674192 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.697542 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.698001 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.717987 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.718450 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.986743 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:39 crc kubenswrapper[4940]: I1126 08:43:39.987253 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.017250 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.018087 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.073217 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.101486 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.104558 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.112806 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.116677 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.126879 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.292127 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66f4b75c59-qv6wd"] Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.293999 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.306925 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.323524 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66f4b75c59-qv6wd"] Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.371116 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlpg4\" (UniqueName: \"kubernetes.io/projected/d625daa6-76f9-4127-ac90-8db26d82244a-kube-api-access-hlpg4\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.371408 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-dns-svc\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.371472 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-config\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.371520 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-ovsdbserver-sb\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.478179 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-ovsdbserver-sb\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.478266 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlpg4\" (UniqueName: \"kubernetes.io/projected/d625daa6-76f9-4127-ac90-8db26d82244a-kube-api-access-hlpg4\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.478347 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-dns-svc\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.478375 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-config\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.479305 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-config\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.479374 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-ovsdbserver-sb\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.479500 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-dns-svc\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.488713 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66f4b75c59-qv6wd"] Nov 26 08:43:40 crc kubenswrapper[4940]: E1126 08:43:40.489412 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-hlpg4], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" podUID="d625daa6-76f9-4127-ac90-8db26d82244a" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.523639 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlpg4\" (UniqueName: \"kubernetes.io/projected/d625daa6-76f9-4127-ac90-8db26d82244a-kube-api-access-hlpg4\") pod \"dnsmasq-dns-66f4b75c59-qv6wd\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.555369 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8447fd79d5-fss9b"] Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.557027 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.560584 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.567758 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8447fd79d5-fss9b"] Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.680876 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-nb\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.680969 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-sb\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.681031 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-dns-svc\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.681118 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-config\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.681155 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbn6r\" (UniqueName: \"kubernetes.io/projected/6953b269-d306-4f7a-900e-1334d18333cf-kube-api-access-qbn6r\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.783170 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-dns-svc\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.783245 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-config\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.783276 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbn6r\" (UniqueName: \"kubernetes.io/projected/6953b269-d306-4f7a-900e-1334d18333cf-kube-api-access-qbn6r\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.783312 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-nb\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.783356 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-sb\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.784284 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-dns-svc\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.784384 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-sb\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.784412 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-nb\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.784755 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-config\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.808813 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbn6r\" (UniqueName: \"kubernetes.io/projected/6953b269-d306-4f7a-900e-1334d18333cf-kube-api-access-qbn6r\") pod \"dnsmasq-dns-8447fd79d5-fss9b\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:40 crc kubenswrapper[4940]: I1126 08:43:40.887994 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.061915 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.082828 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.196878 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-ovsdbserver-sb\") pod \"d625daa6-76f9-4127-ac90-8db26d82244a\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.196967 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-dns-svc\") pod \"d625daa6-76f9-4127-ac90-8db26d82244a\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.197012 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlpg4\" (UniqueName: \"kubernetes.io/projected/d625daa6-76f9-4127-ac90-8db26d82244a-kube-api-access-hlpg4\") pod \"d625daa6-76f9-4127-ac90-8db26d82244a\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.197104 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-config\") pod \"d625daa6-76f9-4127-ac90-8db26d82244a\" (UID: \"d625daa6-76f9-4127-ac90-8db26d82244a\") " Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.199399 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d625daa6-76f9-4127-ac90-8db26d82244a" (UID: "d625daa6-76f9-4127-ac90-8db26d82244a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.199419 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-config" (OuterVolumeSpecName: "config") pod "d625daa6-76f9-4127-ac90-8db26d82244a" (UID: "d625daa6-76f9-4127-ac90-8db26d82244a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.199407 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d625daa6-76f9-4127-ac90-8db26d82244a" (UID: "d625daa6-76f9-4127-ac90-8db26d82244a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.201937 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d625daa6-76f9-4127-ac90-8db26d82244a-kube-api-access-hlpg4" (OuterVolumeSpecName: "kube-api-access-hlpg4") pod "d625daa6-76f9-4127-ac90-8db26d82244a" (UID: "d625daa6-76f9-4127-ac90-8db26d82244a"). InnerVolumeSpecName "kube-api-access-hlpg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.299722 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.299771 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlpg4\" (UniqueName: \"kubernetes.io/projected/d625daa6-76f9-4127-ac90-8db26d82244a-kube-api-access-hlpg4\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.299795 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.299815 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d625daa6-76f9-4127-ac90-8db26d82244a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:41 crc kubenswrapper[4940]: I1126 08:43:41.361189 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8447fd79d5-fss9b"] Nov 26 08:43:42 crc kubenswrapper[4940]: I1126 08:43:42.078573 4940 generic.go:334] "Generic (PLEG): container finished" podID="6953b269-d306-4f7a-900e-1334d18333cf" containerID="8c08cc9d125173789140735082cc16ff64bd07578af532a1c351f97e32e4f8ab" exitCode=0 Nov 26 08:43:42 crc kubenswrapper[4940]: I1126 08:43:42.078642 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66f4b75c59-qv6wd" Nov 26 08:43:42 crc kubenswrapper[4940]: I1126 08:43:42.078656 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" event={"ID":"6953b269-d306-4f7a-900e-1334d18333cf","Type":"ContainerDied","Data":"8c08cc9d125173789140735082cc16ff64bd07578af532a1c351f97e32e4f8ab"} Nov 26 08:43:42 crc kubenswrapper[4940]: I1126 08:43:42.078729 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" event={"ID":"6953b269-d306-4f7a-900e-1334d18333cf","Type":"ContainerStarted","Data":"fb571618d5f551ba8983509ad5b3a31372c59ea0e542de8d3c33acf107410f0c"} Nov 26 08:43:42 crc kubenswrapper[4940]: I1126 08:43:42.133819 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66f4b75c59-qv6wd"] Nov 26 08:43:42 crc kubenswrapper[4940]: I1126 08:43:42.204554 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66f4b75c59-qv6wd"] Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.022274 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.023694 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.026291 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.033198 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.090088 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" event={"ID":"6953b269-d306-4f7a-900e-1334d18333cf","Type":"ContainerStarted","Data":"eb7550f7b70c05723ae87d276add4794a5720dda1627da2fa4b6ed88677975d3"} Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.090243 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.114917 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" podStartSLOduration=3.114900717 podStartE2EDuration="3.114900717s" podCreationTimestamp="2025-11-26 08:43:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:43:43.108144621 +0000 UTC m=+6524.628286240" watchObservedRunningTime="2025-11-26 08:43:43.114900717 +0000 UTC m=+6524.635042336" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.137367 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/53b10b93-4376-4a88-9ab0-98ea88881ded-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.137459 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7d78e14c-ea9c-436a-894a-313587835522\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.138163 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j2sz\" (UniqueName: \"kubernetes.io/projected/53b10b93-4376-4a88-9ab0-98ea88881ded-kube-api-access-5j2sz\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.175459 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d625daa6-76f9-4127-ac90-8db26d82244a" path="/var/lib/kubelet/pods/d625daa6-76f9-4127-ac90-8db26d82244a/volumes" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.240042 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j2sz\" (UniqueName: \"kubernetes.io/projected/53b10b93-4376-4a88-9ab0-98ea88881ded-kube-api-access-5j2sz\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.240168 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/53b10b93-4376-4a88-9ab0-98ea88881ded-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.240248 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7d78e14c-ea9c-436a-894a-313587835522\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.243868 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.244140 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7d78e14c-ea9c-436a-894a-313587835522\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b49e6c751afd7e1a7ea5c19107140eed046dc435c64c1f283fb93d308b692875/globalmount\"" pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.244982 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/53b10b93-4376-4a88-9ab0-98ea88881ded-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.255619 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j2sz\" (UniqueName: \"kubernetes.io/projected/53b10b93-4376-4a88-9ab0-98ea88881ded-kube-api-access-5j2sz\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.273630 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7d78e14c-ea9c-436a-894a-313587835522\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522\") pod \"ovn-copy-data\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.360239 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 26 08:43:43 crc kubenswrapper[4940]: I1126 08:43:43.820254 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 08:43:43 crc kubenswrapper[4940]: W1126 08:43:43.822747 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53b10b93_4376_4a88_9ab0_98ea88881ded.slice/crio-0627062bf4d1baf7fdeb446dac66ee6c72aa50e721d1836f6b65452a50241eed WatchSource:0}: Error finding container 0627062bf4d1baf7fdeb446dac66ee6c72aa50e721d1836f6b65452a50241eed: Status 404 returned error can't find the container with id 0627062bf4d1baf7fdeb446dac66ee6c72aa50e721d1836f6b65452a50241eed Nov 26 08:43:44 crc kubenswrapper[4940]: I1126 08:43:44.099734 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"53b10b93-4376-4a88-9ab0-98ea88881ded","Type":"ContainerStarted","Data":"0627062bf4d1baf7fdeb446dac66ee6c72aa50e721d1836f6b65452a50241eed"} Nov 26 08:43:46 crc kubenswrapper[4940]: I1126 08:43:46.114909 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"53b10b93-4376-4a88-9ab0-98ea88881ded","Type":"ContainerStarted","Data":"48ed3f1fe397c68af4339668a277462bc16a299dbb64ff03b086bd88f1f92712"} Nov 26 08:43:46 crc kubenswrapper[4940]: I1126 08:43:46.137926 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.445347451 podStartE2EDuration="5.137902468s" podCreationTimestamp="2025-11-26 08:43:41 +0000 UTC" firstStartedPulling="2025-11-26 08:43:43.825591536 +0000 UTC m=+6525.345733155" lastFinishedPulling="2025-11-26 08:43:45.518146513 +0000 UTC m=+6527.038288172" observedRunningTime="2025-11-26 08:43:46.130693339 +0000 UTC m=+6527.650834958" watchObservedRunningTime="2025-11-26 08:43:46.137902468 +0000 UTC m=+6527.658044087" Nov 26 08:43:50 crc kubenswrapper[4940]: I1126 08:43:50.890450 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:43:50 crc kubenswrapper[4940]: I1126 08:43:50.987397 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fc86c8ff7-cfdk6"] Nov 26 08:43:50 crc kubenswrapper[4940]: I1126 08:43:50.987826 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" podUID="0624efda-c161-496f-9bbb-8519e74d058d" containerName="dnsmasq-dns" containerID="cri-o://9a923e1676d6f2cc68099d52bf3e2b97e4464e20870896b82f86625fbb151cea" gracePeriod=10 Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.171978 4940 generic.go:334] "Generic (PLEG): container finished" podID="0624efda-c161-496f-9bbb-8519e74d058d" containerID="9a923e1676d6f2cc68099d52bf3e2b97e4464e20870896b82f86625fbb151cea" exitCode=0 Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.180399 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" event={"ID":"0624efda-c161-496f-9bbb-8519e74d058d","Type":"ContainerDied","Data":"9a923e1676d6f2cc68099d52bf3e2b97e4464e20870896b82f86625fbb151cea"} Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.549130 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.683763 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-dns-svc\") pod \"0624efda-c161-496f-9bbb-8519e74d058d\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.683970 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-config\") pod \"0624efda-c161-496f-9bbb-8519e74d058d\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.684012 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x76l4\" (UniqueName: \"kubernetes.io/projected/0624efda-c161-496f-9bbb-8519e74d058d-kube-api-access-x76l4\") pod \"0624efda-c161-496f-9bbb-8519e74d058d\" (UID: \"0624efda-c161-496f-9bbb-8519e74d058d\") " Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.690250 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0624efda-c161-496f-9bbb-8519e74d058d-kube-api-access-x76l4" (OuterVolumeSpecName: "kube-api-access-x76l4") pod "0624efda-c161-496f-9bbb-8519e74d058d" (UID: "0624efda-c161-496f-9bbb-8519e74d058d"). InnerVolumeSpecName "kube-api-access-x76l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.720027 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0624efda-c161-496f-9bbb-8519e74d058d" (UID: "0624efda-c161-496f-9bbb-8519e74d058d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.725822 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-config" (OuterVolumeSpecName: "config") pod "0624efda-c161-496f-9bbb-8519e74d058d" (UID: "0624efda-c161-496f-9bbb-8519e74d058d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.786700 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.786729 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0624efda-c161-496f-9bbb-8519e74d058d-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:51 crc kubenswrapper[4940]: I1126 08:43:51.786742 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x76l4\" (UniqueName: \"kubernetes.io/projected/0624efda-c161-496f-9bbb-8519e74d058d-kube-api-access-x76l4\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:52 crc kubenswrapper[4940]: I1126 08:43:52.191243 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" event={"ID":"0624efda-c161-496f-9bbb-8519e74d058d","Type":"ContainerDied","Data":"0dfa3e481af58ff24de99e16275c17b4d8401ff77a0c42c31db7ec57f3e40f48"} Nov 26 08:43:52 crc kubenswrapper[4940]: I1126 08:43:52.191316 4940 scope.go:117] "RemoveContainer" containerID="9a923e1676d6f2cc68099d52bf3e2b97e4464e20870896b82f86625fbb151cea" Nov 26 08:43:52 crc kubenswrapper[4940]: I1126 08:43:52.191385 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" Nov 26 08:43:52 crc kubenswrapper[4940]: I1126 08:43:52.226313 4940 scope.go:117] "RemoveContainer" containerID="9840f5e1bb19e9222e69be48e81b621c2cdc8ec3c4ec3d67a29caf795d6e0f26" Nov 26 08:43:52 crc kubenswrapper[4940]: I1126 08:43:52.248521 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fc86c8ff7-cfdk6"] Nov 26 08:43:52 crc kubenswrapper[4940]: I1126 08:43:52.258238 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fc86c8ff7-cfdk6"] Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.176102 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0624efda-c161-496f-9bbb-8519e74d058d" path="/var/lib/kubelet/pods/0624efda-c161-496f-9bbb-8519e74d058d/volumes" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.551236 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 26 08:43:53 crc kubenswrapper[4940]: E1126 08:43:53.552329 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0624efda-c161-496f-9bbb-8519e74d058d" containerName="dnsmasq-dns" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.552358 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0624efda-c161-496f-9bbb-8519e74d058d" containerName="dnsmasq-dns" Nov 26 08:43:53 crc kubenswrapper[4940]: E1126 08:43:53.552383 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0624efda-c161-496f-9bbb-8519e74d058d" containerName="init" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.552392 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0624efda-c161-496f-9bbb-8519e74d058d" containerName="init" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.552643 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0624efda-c161-496f-9bbb-8519e74d058d" containerName="dnsmasq-dns" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.554085 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.556396 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.557622 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.557923 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-8c52w" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.570599 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.716084 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-scripts\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.716146 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hgc7\" (UniqueName: \"kubernetes.io/projected/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-kube-api-access-5hgc7\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.716234 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-config\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.716358 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.716469 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.818289 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.818404 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-scripts\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.818439 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hgc7\" (UniqueName: \"kubernetes.io/projected/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-kube-api-access-5hgc7\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.818469 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-config\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.818870 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.818950 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.819395 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-scripts\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.819541 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-config\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.823017 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.833827 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hgc7\" (UniqueName: \"kubernetes.io/projected/1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1-kube-api-access-5hgc7\") pod \"ovn-northd-0\" (UID: \"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1\") " pod="openstack/ovn-northd-0" Nov 26 08:43:53 crc kubenswrapper[4940]: I1126 08:43:53.875929 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 08:43:54 crc kubenswrapper[4940]: I1126 08:43:54.304582 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 08:43:54 crc kubenswrapper[4940]: W1126 08:43:54.311233 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1fc4c1a4_2eb4_4aaf_a767_2894a8f8f0d1.slice/crio-f18df159f13991b727d5c633f403914164b3e3856f3853e0edfc07158f0f8456 WatchSource:0}: Error finding container f18df159f13991b727d5c633f403914164b3e3856f3853e0edfc07158f0f8456: Status 404 returned error can't find the container with id f18df159f13991b727d5c633f403914164b3e3856f3853e0edfc07158f0f8456 Nov 26 08:43:55 crc kubenswrapper[4940]: I1126 08:43:55.227297 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1","Type":"ContainerStarted","Data":"c802766d9fe4f7fea39e8bae9ec10f14068c7aac5a0d230fdcdbab798bb742f8"} Nov 26 08:43:55 crc kubenswrapper[4940]: I1126 08:43:55.227659 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1","Type":"ContainerStarted","Data":"f18df159f13991b727d5c633f403914164b3e3856f3853e0edfc07158f0f8456"} Nov 26 08:43:56 crc kubenswrapper[4940]: I1126 08:43:56.240573 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1","Type":"ContainerStarted","Data":"b0879a76c67c507226d0d2fd85d522665f40f6e9ccfe3b3fe61e16017d09bcc9"} Nov 26 08:43:56 crc kubenswrapper[4940]: I1126 08:43:56.241810 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 26 08:43:56 crc kubenswrapper[4940]: I1126 08:43:56.264672 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.638902827 podStartE2EDuration="3.264651424s" podCreationTimestamp="2025-11-26 08:43:53 +0000 UTC" firstStartedPulling="2025-11-26 08:43:54.314485153 +0000 UTC m=+6535.834626772" lastFinishedPulling="2025-11-26 08:43:54.94023375 +0000 UTC m=+6536.460375369" observedRunningTime="2025-11-26 08:43:56.258439725 +0000 UTC m=+6537.778581384" watchObservedRunningTime="2025-11-26 08:43:56.264651424 +0000 UTC m=+6537.784793053" Nov 26 08:43:56 crc kubenswrapper[4940]: I1126 08:43:56.444250 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5fc86c8ff7-cfdk6" podUID="0624efda-c161-496f-9bbb-8519e74d058d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.26:5353: i/o timeout" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.006738 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cd18-account-create-update-n4wgd"] Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.008321 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.010470 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.017658 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-5zt9x"] Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.018915 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.033992 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63e84272-622c-49b0-a1d5-d13bce734c64-operator-scripts\") pod \"keystone-db-create-5zt9x\" (UID: \"63e84272-622c-49b0-a1d5-d13bce734c64\") " pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.034154 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pn765\" (UniqueName: \"kubernetes.io/projected/63e84272-622c-49b0-a1d5-d13bce734c64-kube-api-access-pn765\") pod \"keystone-db-create-5zt9x\" (UID: \"63e84272-622c-49b0-a1d5-d13bce734c64\") " pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.034538 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cd18-account-create-update-n4wgd"] Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.046193 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-5zt9x"] Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.136260 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63e84272-622c-49b0-a1d5-d13bce734c64-operator-scripts\") pod \"keystone-db-create-5zt9x\" (UID: \"63e84272-622c-49b0-a1d5-d13bce734c64\") " pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.136428 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9e91a83-324a-4101-b142-bdb57ed475ac-operator-scripts\") pod \"keystone-cd18-account-create-update-n4wgd\" (UID: \"f9e91a83-324a-4101-b142-bdb57ed475ac\") " pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.136504 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pn765\" (UniqueName: \"kubernetes.io/projected/63e84272-622c-49b0-a1d5-d13bce734c64-kube-api-access-pn765\") pod \"keystone-db-create-5zt9x\" (UID: \"63e84272-622c-49b0-a1d5-d13bce734c64\") " pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.136711 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf4bv\" (UniqueName: \"kubernetes.io/projected/f9e91a83-324a-4101-b142-bdb57ed475ac-kube-api-access-cf4bv\") pod \"keystone-cd18-account-create-update-n4wgd\" (UID: \"f9e91a83-324a-4101-b142-bdb57ed475ac\") " pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.137182 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63e84272-622c-49b0-a1d5-d13bce734c64-operator-scripts\") pod \"keystone-db-create-5zt9x\" (UID: \"63e84272-622c-49b0-a1d5-d13bce734c64\") " pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.157967 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pn765\" (UniqueName: \"kubernetes.io/projected/63e84272-622c-49b0-a1d5-d13bce734c64-kube-api-access-pn765\") pod \"keystone-db-create-5zt9x\" (UID: \"63e84272-622c-49b0-a1d5-d13bce734c64\") " pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.237965 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf4bv\" (UniqueName: \"kubernetes.io/projected/f9e91a83-324a-4101-b142-bdb57ed475ac-kube-api-access-cf4bv\") pod \"keystone-cd18-account-create-update-n4wgd\" (UID: \"f9e91a83-324a-4101-b142-bdb57ed475ac\") " pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.238100 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9e91a83-324a-4101-b142-bdb57ed475ac-operator-scripts\") pod \"keystone-cd18-account-create-update-n4wgd\" (UID: \"f9e91a83-324a-4101-b142-bdb57ed475ac\") " pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.239994 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9e91a83-324a-4101-b142-bdb57ed475ac-operator-scripts\") pod \"keystone-cd18-account-create-update-n4wgd\" (UID: \"f9e91a83-324a-4101-b142-bdb57ed475ac\") " pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.255030 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf4bv\" (UniqueName: \"kubernetes.io/projected/f9e91a83-324a-4101-b142-bdb57ed475ac-kube-api-access-cf4bv\") pod \"keystone-cd18-account-create-update-n4wgd\" (UID: \"f9e91a83-324a-4101-b142-bdb57ed475ac\") " pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.343308 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.359366 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.689208 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-5zt9x"] Nov 26 08:44:01 crc kubenswrapper[4940]: W1126 08:44:01.689997 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63e84272_622c_49b0_a1d5_d13bce734c64.slice/crio-9c67079d01906f207c7a552663bd13f0fab49b833c5963324cf476616f89c6c0 WatchSource:0}: Error finding container 9c67079d01906f207c7a552663bd13f0fab49b833c5963324cf476616f89c6c0: Status 404 returned error can't find the container with id 9c67079d01906f207c7a552663bd13f0fab49b833c5963324cf476616f89c6c0 Nov 26 08:44:01 crc kubenswrapper[4940]: W1126 08:44:01.792238 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9e91a83_324a_4101_b142_bdb57ed475ac.slice/crio-b128b5e50f2adf282501804ffb4aa7cd295a934980270f0b8c0b09b834cdf6f1 WatchSource:0}: Error finding container b128b5e50f2adf282501804ffb4aa7cd295a934980270f0b8c0b09b834cdf6f1: Status 404 returned error can't find the container with id b128b5e50f2adf282501804ffb4aa7cd295a934980270f0b8c0b09b834cdf6f1 Nov 26 08:44:01 crc kubenswrapper[4940]: I1126 08:44:01.793734 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cd18-account-create-update-n4wgd"] Nov 26 08:44:02 crc kubenswrapper[4940]: I1126 08:44:02.294028 4940 generic.go:334] "Generic (PLEG): container finished" podID="63e84272-622c-49b0-a1d5-d13bce734c64" containerID="591f2f4ef6e508a270b5d8f0b185cdd353717be5a56703cf615159d43dd0b1d4" exitCode=0 Nov 26 08:44:02 crc kubenswrapper[4940]: I1126 08:44:02.294174 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5zt9x" event={"ID":"63e84272-622c-49b0-a1d5-d13bce734c64","Type":"ContainerDied","Data":"591f2f4ef6e508a270b5d8f0b185cdd353717be5a56703cf615159d43dd0b1d4"} Nov 26 08:44:02 crc kubenswrapper[4940]: I1126 08:44:02.294430 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5zt9x" event={"ID":"63e84272-622c-49b0-a1d5-d13bce734c64","Type":"ContainerStarted","Data":"9c67079d01906f207c7a552663bd13f0fab49b833c5963324cf476616f89c6c0"} Nov 26 08:44:02 crc kubenswrapper[4940]: I1126 08:44:02.296373 4940 generic.go:334] "Generic (PLEG): container finished" podID="f9e91a83-324a-4101-b142-bdb57ed475ac" containerID="ab89f4f6e8a00976895dc06bfa33d886f0516f81e08b7e39d88ddcf38e211237" exitCode=0 Nov 26 08:44:02 crc kubenswrapper[4940]: I1126 08:44:02.296427 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cd18-account-create-update-n4wgd" event={"ID":"f9e91a83-324a-4101-b142-bdb57ed475ac","Type":"ContainerDied","Data":"ab89f4f6e8a00976895dc06bfa33d886f0516f81e08b7e39d88ddcf38e211237"} Nov 26 08:44:02 crc kubenswrapper[4940]: I1126 08:44:02.296500 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cd18-account-create-update-n4wgd" event={"ID":"f9e91a83-324a-4101-b142-bdb57ed475ac","Type":"ContainerStarted","Data":"b128b5e50f2adf282501804ffb4aa7cd295a934980270f0b8c0b09b834cdf6f1"} Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.754578 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.766574 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.890359 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9e91a83-324a-4101-b142-bdb57ed475ac-operator-scripts\") pod \"f9e91a83-324a-4101-b142-bdb57ed475ac\" (UID: \"f9e91a83-324a-4101-b142-bdb57ed475ac\") " Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.890444 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pn765\" (UniqueName: \"kubernetes.io/projected/63e84272-622c-49b0-a1d5-d13bce734c64-kube-api-access-pn765\") pod \"63e84272-622c-49b0-a1d5-d13bce734c64\" (UID: \"63e84272-622c-49b0-a1d5-d13bce734c64\") " Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.890469 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63e84272-622c-49b0-a1d5-d13bce734c64-operator-scripts\") pod \"63e84272-622c-49b0-a1d5-d13bce734c64\" (UID: \"63e84272-622c-49b0-a1d5-d13bce734c64\") " Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.890556 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cf4bv\" (UniqueName: \"kubernetes.io/projected/f9e91a83-324a-4101-b142-bdb57ed475ac-kube-api-access-cf4bv\") pod \"f9e91a83-324a-4101-b142-bdb57ed475ac\" (UID: \"f9e91a83-324a-4101-b142-bdb57ed475ac\") " Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.891020 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9e91a83-324a-4101-b142-bdb57ed475ac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f9e91a83-324a-4101-b142-bdb57ed475ac" (UID: "f9e91a83-324a-4101-b142-bdb57ed475ac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.891199 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63e84272-622c-49b0-a1d5-d13bce734c64-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "63e84272-622c-49b0-a1d5-d13bce734c64" (UID: "63e84272-622c-49b0-a1d5-d13bce734c64"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.896500 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63e84272-622c-49b0-a1d5-d13bce734c64-kube-api-access-pn765" (OuterVolumeSpecName: "kube-api-access-pn765") pod "63e84272-622c-49b0-a1d5-d13bce734c64" (UID: "63e84272-622c-49b0-a1d5-d13bce734c64"). InnerVolumeSpecName "kube-api-access-pn765". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.897467 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9e91a83-324a-4101-b142-bdb57ed475ac-kube-api-access-cf4bv" (OuterVolumeSpecName: "kube-api-access-cf4bv") pod "f9e91a83-324a-4101-b142-bdb57ed475ac" (UID: "f9e91a83-324a-4101-b142-bdb57ed475ac"). InnerVolumeSpecName "kube-api-access-cf4bv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.992596 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cf4bv\" (UniqueName: \"kubernetes.io/projected/f9e91a83-324a-4101-b142-bdb57ed475ac-kube-api-access-cf4bv\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.992650 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9e91a83-324a-4101-b142-bdb57ed475ac-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.992669 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pn765\" (UniqueName: \"kubernetes.io/projected/63e84272-622c-49b0-a1d5-d13bce734c64-kube-api-access-pn765\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:03 crc kubenswrapper[4940]: I1126 08:44:03.992687 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/63e84272-622c-49b0-a1d5-d13bce734c64-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:04 crc kubenswrapper[4940]: I1126 08:44:04.322485 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5zt9x" event={"ID":"63e84272-622c-49b0-a1d5-d13bce734c64","Type":"ContainerDied","Data":"9c67079d01906f207c7a552663bd13f0fab49b833c5963324cf476616f89c6c0"} Nov 26 08:44:04 crc kubenswrapper[4940]: I1126 08:44:04.322541 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c67079d01906f207c7a552663bd13f0fab49b833c5963324cf476616f89c6c0" Nov 26 08:44:04 crc kubenswrapper[4940]: I1126 08:44:04.322543 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5zt9x" Nov 26 08:44:04 crc kubenswrapper[4940]: I1126 08:44:04.326000 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cd18-account-create-update-n4wgd" Nov 26 08:44:04 crc kubenswrapper[4940]: I1126 08:44:04.325989 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cd18-account-create-update-n4wgd" event={"ID":"f9e91a83-324a-4101-b142-bdb57ed475ac","Type":"ContainerDied","Data":"b128b5e50f2adf282501804ffb4aa7cd295a934980270f0b8c0b09b834cdf6f1"} Nov 26 08:44:04 crc kubenswrapper[4940]: I1126 08:44:04.326131 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b128b5e50f2adf282501804ffb4aa7cd295a934980270f0b8c0b09b834cdf6f1" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.386560 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-5lxt4"] Nov 26 08:44:06 crc kubenswrapper[4940]: E1126 08:44:06.388202 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9e91a83-324a-4101-b142-bdb57ed475ac" containerName="mariadb-account-create-update" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.388362 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9e91a83-324a-4101-b142-bdb57ed475ac" containerName="mariadb-account-create-update" Nov 26 08:44:06 crc kubenswrapper[4940]: E1126 08:44:06.388501 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63e84272-622c-49b0-a1d5-d13bce734c64" containerName="mariadb-database-create" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.388599 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="63e84272-622c-49b0-a1d5-d13bce734c64" containerName="mariadb-database-create" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.394445 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9e91a83-324a-4101-b142-bdb57ed475ac" containerName="mariadb-account-create-update" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.394720 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="63e84272-622c-49b0-a1d5-d13bce734c64" containerName="mariadb-database-create" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.395676 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.405398 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-5lxt4"] Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.406704 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.407428 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.407745 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.408094 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hm8sh" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.444750 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4qjg\" (UniqueName: \"kubernetes.io/projected/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-kube-api-access-q4qjg\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.444888 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-config-data\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.444987 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-combined-ca-bundle\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.546343 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4qjg\" (UniqueName: \"kubernetes.io/projected/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-kube-api-access-q4qjg\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.546808 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-config-data\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.546924 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-combined-ca-bundle\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.555611 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-combined-ca-bundle\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.556197 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-config-data\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.565566 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4qjg\" (UniqueName: \"kubernetes.io/projected/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-kube-api-access-q4qjg\") pod \"keystone-db-sync-5lxt4\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:06 crc kubenswrapper[4940]: I1126 08:44:06.750005 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:07 crc kubenswrapper[4940]: I1126 08:44:07.177872 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-5lxt4"] Nov 26 08:44:07 crc kubenswrapper[4940]: I1126 08:44:07.354488 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-5lxt4" event={"ID":"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3","Type":"ContainerStarted","Data":"75d9f308d1bbb9e728b1686c0851aa4f95faa144439e1e82602025cded8a6ac4"} Nov 26 08:44:08 crc kubenswrapper[4940]: I1126 08:44:08.930132 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 26 08:44:12 crc kubenswrapper[4940]: I1126 08:44:12.410514 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-5lxt4" event={"ID":"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3","Type":"ContainerStarted","Data":"98aba1818861fa16b1e7c591993d05df210a7668e1b1be3633c1d1bd43bb3db9"} Nov 26 08:44:12 crc kubenswrapper[4940]: I1126 08:44:12.435796 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-5lxt4" podStartSLOduration=1.97413288 podStartE2EDuration="6.435776982s" podCreationTimestamp="2025-11-26 08:44:06 +0000 UTC" firstStartedPulling="2025-11-26 08:44:07.178205519 +0000 UTC m=+6548.698347138" lastFinishedPulling="2025-11-26 08:44:11.639849621 +0000 UTC m=+6553.159991240" observedRunningTime="2025-11-26 08:44:12.427831809 +0000 UTC m=+6553.947973448" watchObservedRunningTime="2025-11-26 08:44:12.435776982 +0000 UTC m=+6553.955918601" Nov 26 08:44:14 crc kubenswrapper[4940]: I1126 08:44:14.438107 4940 generic.go:334] "Generic (PLEG): container finished" podID="7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3" containerID="98aba1818861fa16b1e7c591993d05df210a7668e1b1be3633c1d1bd43bb3db9" exitCode=0 Nov 26 08:44:14 crc kubenswrapper[4940]: I1126 08:44:14.438257 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-5lxt4" event={"ID":"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3","Type":"ContainerDied","Data":"98aba1818861fa16b1e7c591993d05df210a7668e1b1be3633c1d1bd43bb3db9"} Nov 26 08:44:15 crc kubenswrapper[4940]: I1126 08:44:15.776089 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:15 crc kubenswrapper[4940]: I1126 08:44:15.904039 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-config-data\") pod \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " Nov 26 08:44:15 crc kubenswrapper[4940]: I1126 08:44:15.904239 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4qjg\" (UniqueName: \"kubernetes.io/projected/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-kube-api-access-q4qjg\") pod \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " Nov 26 08:44:15 crc kubenswrapper[4940]: I1126 08:44:15.904303 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-combined-ca-bundle\") pod \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\" (UID: \"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3\") " Nov 26 08:44:15 crc kubenswrapper[4940]: I1126 08:44:15.909635 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-kube-api-access-q4qjg" (OuterVolumeSpecName: "kube-api-access-q4qjg") pod "7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3" (UID: "7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3"). InnerVolumeSpecName "kube-api-access-q4qjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:44:15 crc kubenswrapper[4940]: I1126 08:44:15.932595 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3" (UID: "7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:15 crc kubenswrapper[4940]: I1126 08:44:15.956689 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-config-data" (OuterVolumeSpecName: "config-data") pod "7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3" (UID: "7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.005737 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.005784 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.005797 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4qjg\" (UniqueName: \"kubernetes.io/projected/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3-kube-api-access-q4qjg\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.463704 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-5lxt4" event={"ID":"7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3","Type":"ContainerDied","Data":"75d9f308d1bbb9e728b1686c0851aa4f95faa144439e1e82602025cded8a6ac4"} Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.463751 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75d9f308d1bbb9e728b1686c0851aa4f95faa144439e1e82602025cded8a6ac4" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.463966 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-5lxt4" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.745776 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59c754c49f-q4t8x"] Nov 26 08:44:16 crc kubenswrapper[4940]: E1126 08:44:16.746337 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3" containerName="keystone-db-sync" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.746354 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3" containerName="keystone-db-sync" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.746531 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3" containerName="keystone-db-sync" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.747382 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.771683 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59c754c49f-q4t8x"] Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.796882 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-29t9s"] Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.799970 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.802364 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.802400 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.802820 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.802943 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.803411 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hm8sh" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.812769 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-29t9s"] Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.922680 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-nb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.922746 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-dns-svc\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.922830 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-config-data\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.922966 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-sb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.923029 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-config\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.923059 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-credential-keys\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.923081 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-combined-ca-bundle\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.923169 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-scripts\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.923222 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tdsb\" (UniqueName: \"kubernetes.io/projected/757dd3ba-00d6-4226-bfc7-f3e18531acd0-kube-api-access-9tdsb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.923243 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8n5j\" (UniqueName: \"kubernetes.io/projected/34e10231-a0d5-49ef-88a0-9716e73c0e7b-kube-api-access-l8n5j\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:16 crc kubenswrapper[4940]: I1126 08:44:16.923270 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-fernet-keys\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025082 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-nb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025139 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-dns-svc\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025161 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-config-data\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025192 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-sb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025219 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-config\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025237 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-credential-keys\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025257 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-combined-ca-bundle\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025295 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-scripts\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025322 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8n5j\" (UniqueName: \"kubernetes.io/projected/34e10231-a0d5-49ef-88a0-9716e73c0e7b-kube-api-access-l8n5j\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025339 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tdsb\" (UniqueName: \"kubernetes.io/projected/757dd3ba-00d6-4226-bfc7-f3e18531acd0-kube-api-access-9tdsb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.025357 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-fernet-keys\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.026316 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-sb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.026361 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-dns-svc\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.026361 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-nb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.028334 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-config\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.030961 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-fernet-keys\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.034750 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-scripts\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.035733 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-config-data\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.037145 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-combined-ca-bundle\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.040438 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-credential-keys\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.048962 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8n5j\" (UniqueName: \"kubernetes.io/projected/34e10231-a0d5-49ef-88a0-9716e73c0e7b-kube-api-access-l8n5j\") pod \"keystone-bootstrap-29t9s\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.061946 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tdsb\" (UniqueName: \"kubernetes.io/projected/757dd3ba-00d6-4226-bfc7-f3e18531acd0-kube-api-access-9tdsb\") pod \"dnsmasq-dns-59c754c49f-q4t8x\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.064900 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.124489 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.550355 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59c754c49f-q4t8x"] Nov 26 08:44:17 crc kubenswrapper[4940]: W1126 08:44:17.551675 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod757dd3ba_00d6_4226_bfc7_f3e18531acd0.slice/crio-91c11a6fae5cc122e844a53305773e83e6b26ab99be504bc83ff5c66893e7113 WatchSource:0}: Error finding container 91c11a6fae5cc122e844a53305773e83e6b26ab99be504bc83ff5c66893e7113: Status 404 returned error can't find the container with id 91c11a6fae5cc122e844a53305773e83e6b26ab99be504bc83ff5c66893e7113 Nov 26 08:44:17 crc kubenswrapper[4940]: I1126 08:44:17.632109 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-29t9s"] Nov 26 08:44:17 crc kubenswrapper[4940]: W1126 08:44:17.638874 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34e10231_a0d5_49ef_88a0_9716e73c0e7b.slice/crio-618bfc73a0ceb889e165a4e3a0f36da2535d277931616a9a42f11dc2bcc1605f WatchSource:0}: Error finding container 618bfc73a0ceb889e165a4e3a0f36da2535d277931616a9a42f11dc2bcc1605f: Status 404 returned error can't find the container with id 618bfc73a0ceb889e165a4e3a0f36da2535d277931616a9a42f11dc2bcc1605f Nov 26 08:44:18 crc kubenswrapper[4940]: I1126 08:44:18.485627 4940 generic.go:334] "Generic (PLEG): container finished" podID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" containerID="75a7e01d4e289311916ae224710632aedbd489f857a84d7cf7f01091affb6630" exitCode=0 Nov 26 08:44:18 crc kubenswrapper[4940]: I1126 08:44:18.485768 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" event={"ID":"757dd3ba-00d6-4226-bfc7-f3e18531acd0","Type":"ContainerDied","Data":"75a7e01d4e289311916ae224710632aedbd489f857a84d7cf7f01091affb6630"} Nov 26 08:44:18 crc kubenswrapper[4940]: I1126 08:44:18.488859 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" event={"ID":"757dd3ba-00d6-4226-bfc7-f3e18531acd0","Type":"ContainerStarted","Data":"91c11a6fae5cc122e844a53305773e83e6b26ab99be504bc83ff5c66893e7113"} Nov 26 08:44:18 crc kubenswrapper[4940]: I1126 08:44:18.490123 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-29t9s" event={"ID":"34e10231-a0d5-49ef-88a0-9716e73c0e7b","Type":"ContainerStarted","Data":"2c42738d0842e326477ab3aa88798d8dca2300e3a76104cfad051d8a0bb6421d"} Nov 26 08:44:18 crc kubenswrapper[4940]: I1126 08:44:18.490177 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-29t9s" event={"ID":"34e10231-a0d5-49ef-88a0-9716e73c0e7b","Type":"ContainerStarted","Data":"618bfc73a0ceb889e165a4e3a0f36da2535d277931616a9a42f11dc2bcc1605f"} Nov 26 08:44:18 crc kubenswrapper[4940]: I1126 08:44:18.566157 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-29t9s" podStartSLOduration=2.566133698 podStartE2EDuration="2.566133698s" podCreationTimestamp="2025-11-26 08:44:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:44:18.564448765 +0000 UTC m=+6560.084590414" watchObservedRunningTime="2025-11-26 08:44:18.566133698 +0000 UTC m=+6560.086275337" Nov 26 08:44:19 crc kubenswrapper[4940]: I1126 08:44:19.500525 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" event={"ID":"757dd3ba-00d6-4226-bfc7-f3e18531acd0","Type":"ContainerStarted","Data":"1eba3a5110467e2f3effe39cf8d66a07760b92ab8329b5408d55acf514fa8b40"} Nov 26 08:44:19 crc kubenswrapper[4940]: I1126 08:44:19.529169 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" podStartSLOduration=3.529145063 podStartE2EDuration="3.529145063s" podCreationTimestamp="2025-11-26 08:44:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:44:19.521084486 +0000 UTC m=+6561.041226115" watchObservedRunningTime="2025-11-26 08:44:19.529145063 +0000 UTC m=+6561.049286692" Nov 26 08:44:20 crc kubenswrapper[4940]: I1126 08:44:20.511941 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:21 crc kubenswrapper[4940]: I1126 08:44:21.527550 4940 generic.go:334] "Generic (PLEG): container finished" podID="34e10231-a0d5-49ef-88a0-9716e73c0e7b" containerID="2c42738d0842e326477ab3aa88798d8dca2300e3a76104cfad051d8a0bb6421d" exitCode=0 Nov 26 08:44:21 crc kubenswrapper[4940]: I1126 08:44:21.527740 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-29t9s" event={"ID":"34e10231-a0d5-49ef-88a0-9716e73c0e7b","Type":"ContainerDied","Data":"2c42738d0842e326477ab3aa88798d8dca2300e3a76104cfad051d8a0bb6421d"} Nov 26 08:44:22 crc kubenswrapper[4940]: I1126 08:44:22.919310 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.032130 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-scripts\") pod \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.032240 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8n5j\" (UniqueName: \"kubernetes.io/projected/34e10231-a0d5-49ef-88a0-9716e73c0e7b-kube-api-access-l8n5j\") pod \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.032279 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-config-data\") pod \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.032383 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-fernet-keys\") pod \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.032412 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-combined-ca-bundle\") pod \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.032438 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-credential-keys\") pod \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\" (UID: \"34e10231-a0d5-49ef-88a0-9716e73c0e7b\") " Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.051709 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "34e10231-a0d5-49ef-88a0-9716e73c0e7b" (UID: "34e10231-a0d5-49ef-88a0-9716e73c0e7b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.051874 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34e10231-a0d5-49ef-88a0-9716e73c0e7b-kube-api-access-l8n5j" (OuterVolumeSpecName: "kube-api-access-l8n5j") pod "34e10231-a0d5-49ef-88a0-9716e73c0e7b" (UID: "34e10231-a0d5-49ef-88a0-9716e73c0e7b"). InnerVolumeSpecName "kube-api-access-l8n5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.051936 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-scripts" (OuterVolumeSpecName: "scripts") pod "34e10231-a0d5-49ef-88a0-9716e73c0e7b" (UID: "34e10231-a0d5-49ef-88a0-9716e73c0e7b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.054676 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "34e10231-a0d5-49ef-88a0-9716e73c0e7b" (UID: "34e10231-a0d5-49ef-88a0-9716e73c0e7b"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.080693 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34e10231-a0d5-49ef-88a0-9716e73c0e7b" (UID: "34e10231-a0d5-49ef-88a0-9716e73c0e7b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.083458 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-config-data" (OuterVolumeSpecName: "config-data") pod "34e10231-a0d5-49ef-88a0-9716e73c0e7b" (UID: "34e10231-a0d5-49ef-88a0-9716e73c0e7b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.135482 4940 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.135524 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.135538 4940 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.135550 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.135560 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8n5j\" (UniqueName: \"kubernetes.io/projected/34e10231-a0d5-49ef-88a0-9716e73c0e7b-kube-api-access-l8n5j\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.135571 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34e10231-a0d5-49ef-88a0-9716e73c0e7b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.551483 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-29t9s" event={"ID":"34e10231-a0d5-49ef-88a0-9716e73c0e7b","Type":"ContainerDied","Data":"618bfc73a0ceb889e165a4e3a0f36da2535d277931616a9a42f11dc2bcc1605f"} Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.551522 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="618bfc73a0ceb889e165a4e3a0f36da2535d277931616a9a42f11dc2bcc1605f" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.551617 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-29t9s" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.641124 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-29t9s"] Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.652749 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-29t9s"] Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.720092 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-t6hpq"] Nov 26 08:44:23 crc kubenswrapper[4940]: E1126 08:44:23.720481 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34e10231-a0d5-49ef-88a0-9716e73c0e7b" containerName="keystone-bootstrap" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.720503 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="34e10231-a0d5-49ef-88a0-9716e73c0e7b" containerName="keystone-bootstrap" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.720697 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="34e10231-a0d5-49ef-88a0-9716e73c0e7b" containerName="keystone-bootstrap" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.721404 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.723323 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.727941 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.728122 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.728170 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hm8sh" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.728603 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.737280 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-t6hpq"] Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.856443 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-scripts\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.856511 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-config-data\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.856583 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-fernet-keys\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.856627 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-combined-ca-bundle\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.856668 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-credential-keys\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.856730 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p54c9\" (UniqueName: \"kubernetes.io/projected/924ac90c-c73d-4c72-b964-a25d7dece172-kube-api-access-p54c9\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.958554 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-fernet-keys\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.958632 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-combined-ca-bundle\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.958666 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-credential-keys\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.958726 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p54c9\" (UniqueName: \"kubernetes.io/projected/924ac90c-c73d-4c72-b964-a25d7dece172-kube-api-access-p54c9\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.958911 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-scripts\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.958943 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-config-data\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.962434 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-scripts\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.962588 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-fernet-keys\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.963101 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-config-data\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.963292 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-credential-keys\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.963863 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-combined-ca-bundle\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:23 crc kubenswrapper[4940]: I1126 08:44:23.974876 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p54c9\" (UniqueName: \"kubernetes.io/projected/924ac90c-c73d-4c72-b964-a25d7dece172-kube-api-access-p54c9\") pod \"keystone-bootstrap-t6hpq\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:24 crc kubenswrapper[4940]: I1126 08:44:24.042650 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:24 crc kubenswrapper[4940]: I1126 08:44:24.472809 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-t6hpq"] Nov 26 08:44:24 crc kubenswrapper[4940]: I1126 08:44:24.561370 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-t6hpq" event={"ID":"924ac90c-c73d-4c72-b964-a25d7dece172","Type":"ContainerStarted","Data":"b0c5bf1f7fdec61f20cb9f9eadf895ae7508fb9f64c9d8d84e3dd7de02f41efc"} Nov 26 08:44:25 crc kubenswrapper[4940]: I1126 08:44:25.182840 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34e10231-a0d5-49ef-88a0-9716e73c0e7b" path="/var/lib/kubelet/pods/34e10231-a0d5-49ef-88a0-9716e73c0e7b/volumes" Nov 26 08:44:25 crc kubenswrapper[4940]: I1126 08:44:25.575345 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-t6hpq" event={"ID":"924ac90c-c73d-4c72-b964-a25d7dece172","Type":"ContainerStarted","Data":"9a55afa30dc65eaf4ea89064161eaf2a9e23de2c8a5fa2d263a7d85761cb82cf"} Nov 26 08:44:25 crc kubenswrapper[4940]: I1126 08:44:25.612558 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-t6hpq" podStartSLOduration=2.612538831 podStartE2EDuration="2.612538831s" podCreationTimestamp="2025-11-26 08:44:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:44:25.599621239 +0000 UTC m=+6567.119762878" watchObservedRunningTime="2025-11-26 08:44:25.612538831 +0000 UTC m=+6567.132680460" Nov 26 08:44:27 crc kubenswrapper[4940]: I1126 08:44:27.067036 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:44:27 crc kubenswrapper[4940]: I1126 08:44:27.180576 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8447fd79d5-fss9b"] Nov 26 08:44:27 crc kubenswrapper[4940]: I1126 08:44:27.181155 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" podUID="6953b269-d306-4f7a-900e-1334d18333cf" containerName="dnsmasq-dns" containerID="cri-o://eb7550f7b70c05723ae87d276add4794a5720dda1627da2fa4b6ed88677975d3" gracePeriod=10 Nov 26 08:44:27 crc kubenswrapper[4940]: I1126 08:44:27.598070 4940 generic.go:334] "Generic (PLEG): container finished" podID="924ac90c-c73d-4c72-b964-a25d7dece172" containerID="9a55afa30dc65eaf4ea89064161eaf2a9e23de2c8a5fa2d263a7d85761cb82cf" exitCode=0 Nov 26 08:44:27 crc kubenswrapper[4940]: I1126 08:44:27.598120 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-t6hpq" event={"ID":"924ac90c-c73d-4c72-b964-a25d7dece172","Type":"ContainerDied","Data":"9a55afa30dc65eaf4ea89064161eaf2a9e23de2c8a5fa2d263a7d85761cb82cf"} Nov 26 08:44:27 crc kubenswrapper[4940]: I1126 08:44:27.601710 4940 generic.go:334] "Generic (PLEG): container finished" podID="6953b269-d306-4f7a-900e-1334d18333cf" containerID="eb7550f7b70c05723ae87d276add4794a5720dda1627da2fa4b6ed88677975d3" exitCode=0 Nov 26 08:44:27 crc kubenswrapper[4940]: I1126 08:44:27.601755 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" event={"ID":"6953b269-d306-4f7a-900e-1334d18333cf","Type":"ContainerDied","Data":"eb7550f7b70c05723ae87d276add4794a5720dda1627da2fa4b6ed88677975d3"} Nov 26 08:44:27 crc kubenswrapper[4940]: I1126 08:44:27.853287 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.029900 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-config\") pod \"6953b269-d306-4f7a-900e-1334d18333cf\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.030257 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-nb\") pod \"6953b269-d306-4f7a-900e-1334d18333cf\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.030283 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-sb\") pod \"6953b269-d306-4f7a-900e-1334d18333cf\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.030301 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-dns-svc\") pod \"6953b269-d306-4f7a-900e-1334d18333cf\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.030369 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbn6r\" (UniqueName: \"kubernetes.io/projected/6953b269-d306-4f7a-900e-1334d18333cf-kube-api-access-qbn6r\") pod \"6953b269-d306-4f7a-900e-1334d18333cf\" (UID: \"6953b269-d306-4f7a-900e-1334d18333cf\") " Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.038260 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6953b269-d306-4f7a-900e-1334d18333cf-kube-api-access-qbn6r" (OuterVolumeSpecName: "kube-api-access-qbn6r") pod "6953b269-d306-4f7a-900e-1334d18333cf" (UID: "6953b269-d306-4f7a-900e-1334d18333cf"). InnerVolumeSpecName "kube-api-access-qbn6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.070742 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6953b269-d306-4f7a-900e-1334d18333cf" (UID: "6953b269-d306-4f7a-900e-1334d18333cf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.075810 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6953b269-d306-4f7a-900e-1334d18333cf" (UID: "6953b269-d306-4f7a-900e-1334d18333cf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.077993 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6953b269-d306-4f7a-900e-1334d18333cf" (UID: "6953b269-d306-4f7a-900e-1334d18333cf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.086715 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-config" (OuterVolumeSpecName: "config") pod "6953b269-d306-4f7a-900e-1334d18333cf" (UID: "6953b269-d306-4f7a-900e-1334d18333cf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.132358 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.132584 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.132689 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.132763 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbn6r\" (UniqueName: \"kubernetes.io/projected/6953b269-d306-4f7a-900e-1334d18333cf-kube-api-access-qbn6r\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.132839 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6953b269-d306-4f7a-900e-1334d18333cf-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.616933 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.616997 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447fd79d5-fss9b" event={"ID":"6953b269-d306-4f7a-900e-1334d18333cf","Type":"ContainerDied","Data":"fb571618d5f551ba8983509ad5b3a31372c59ea0e542de8d3c33acf107410f0c"} Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.617131 4940 scope.go:117] "RemoveContainer" containerID="eb7550f7b70c05723ae87d276add4794a5720dda1627da2fa4b6ed88677975d3" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.652688 4940 scope.go:117] "RemoveContainer" containerID="8c08cc9d125173789140735082cc16ff64bd07578af532a1c351f97e32e4f8ab" Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.680833 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8447fd79d5-fss9b"] Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.689404 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8447fd79d5-fss9b"] Nov 26 08:44:28 crc kubenswrapper[4940]: I1126 08:44:28.963365 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.053352 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-credential-keys\") pod \"924ac90c-c73d-4c72-b964-a25d7dece172\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.053427 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-combined-ca-bundle\") pod \"924ac90c-c73d-4c72-b964-a25d7dece172\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.053473 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-fernet-keys\") pod \"924ac90c-c73d-4c72-b964-a25d7dece172\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.053524 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p54c9\" (UniqueName: \"kubernetes.io/projected/924ac90c-c73d-4c72-b964-a25d7dece172-kube-api-access-p54c9\") pod \"924ac90c-c73d-4c72-b964-a25d7dece172\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.053641 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-config-data\") pod \"924ac90c-c73d-4c72-b964-a25d7dece172\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.053678 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-scripts\") pod \"924ac90c-c73d-4c72-b964-a25d7dece172\" (UID: \"924ac90c-c73d-4c72-b964-a25d7dece172\") " Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.059253 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "924ac90c-c73d-4c72-b964-a25d7dece172" (UID: "924ac90c-c73d-4c72-b964-a25d7dece172"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.059687 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "924ac90c-c73d-4c72-b964-a25d7dece172" (UID: "924ac90c-c73d-4c72-b964-a25d7dece172"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.061254 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/924ac90c-c73d-4c72-b964-a25d7dece172-kube-api-access-p54c9" (OuterVolumeSpecName: "kube-api-access-p54c9") pod "924ac90c-c73d-4c72-b964-a25d7dece172" (UID: "924ac90c-c73d-4c72-b964-a25d7dece172"). InnerVolumeSpecName "kube-api-access-p54c9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.062224 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-scripts" (OuterVolumeSpecName: "scripts") pod "924ac90c-c73d-4c72-b964-a25d7dece172" (UID: "924ac90c-c73d-4c72-b964-a25d7dece172"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.079000 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "924ac90c-c73d-4c72-b964-a25d7dece172" (UID: "924ac90c-c73d-4c72-b964-a25d7dece172"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.084647 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-config-data" (OuterVolumeSpecName: "config-data") pod "924ac90c-c73d-4c72-b964-a25d7dece172" (UID: "924ac90c-c73d-4c72-b964-a25d7dece172"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.155568 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p54c9\" (UniqueName: \"kubernetes.io/projected/924ac90c-c73d-4c72-b964-a25d7dece172-kube-api-access-p54c9\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.155897 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.155912 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.155937 4940 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.155948 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.155960 4940 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/924ac90c-c73d-4c72-b964-a25d7dece172-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.178244 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6953b269-d306-4f7a-900e-1334d18333cf" path="/var/lib/kubelet/pods/6953b269-d306-4f7a-900e-1334d18333cf/volumes" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.632125 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-t6hpq" event={"ID":"924ac90c-c73d-4c72-b964-a25d7dece172","Type":"ContainerDied","Data":"b0c5bf1f7fdec61f20cb9f9eadf895ae7508fb9f64c9d8d84e3dd7de02f41efc"} Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.632185 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0c5bf1f7fdec61f20cb9f9eadf895ae7508fb9f64c9d8d84e3dd7de02f41efc" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.632205 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-t6hpq" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.737940 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-547b68454-mq24v"] Nov 26 08:44:29 crc kubenswrapper[4940]: E1126 08:44:29.738691 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6953b269-d306-4f7a-900e-1334d18333cf" containerName="init" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.738737 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6953b269-d306-4f7a-900e-1334d18333cf" containerName="init" Nov 26 08:44:29 crc kubenswrapper[4940]: E1126 08:44:29.738794 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6953b269-d306-4f7a-900e-1334d18333cf" containerName="dnsmasq-dns" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.738814 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6953b269-d306-4f7a-900e-1334d18333cf" containerName="dnsmasq-dns" Nov 26 08:44:29 crc kubenswrapper[4940]: E1126 08:44:29.738861 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="924ac90c-c73d-4c72-b964-a25d7dece172" containerName="keystone-bootstrap" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.738882 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="924ac90c-c73d-4c72-b964-a25d7dece172" containerName="keystone-bootstrap" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.739409 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="924ac90c-c73d-4c72-b964-a25d7dece172" containerName="keystone-bootstrap" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.739468 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6953b269-d306-4f7a-900e-1334d18333cf" containerName="dnsmasq-dns" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.740779 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.744390 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.744761 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.745672 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hm8sh" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.746415 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.754365 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-547b68454-mq24v"] Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.870956 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-combined-ca-bundle\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.871010 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-config-data\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.871027 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-credential-keys\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.871348 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-scripts\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.871389 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-fernet-keys\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.871446 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dppn5\" (UniqueName: \"kubernetes.io/projected/41100356-33a2-4f08-be53-7df972e1063f-kube-api-access-dppn5\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.973641 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dppn5\" (UniqueName: \"kubernetes.io/projected/41100356-33a2-4f08-be53-7df972e1063f-kube-api-access-dppn5\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.973767 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-combined-ca-bundle\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.973811 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-config-data\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.973836 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-credential-keys\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.973956 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-scripts\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.974476 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-fernet-keys\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.977679 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-credential-keys\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.977695 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-scripts\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.978472 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-combined-ca-bundle\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.979572 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-fernet-keys\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.984326 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41100356-33a2-4f08-be53-7df972e1063f-config-data\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:29 crc kubenswrapper[4940]: I1126 08:44:29.994451 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dppn5\" (UniqueName: \"kubernetes.io/projected/41100356-33a2-4f08-be53-7df972e1063f-kube-api-access-dppn5\") pod \"keystone-547b68454-mq24v\" (UID: \"41100356-33a2-4f08-be53-7df972e1063f\") " pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:30 crc kubenswrapper[4940]: I1126 08:44:30.074809 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:30 crc kubenswrapper[4940]: I1126 08:44:30.546606 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-547b68454-mq24v"] Nov 26 08:44:30 crc kubenswrapper[4940]: W1126 08:44:30.552443 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41100356_33a2_4f08_be53_7df972e1063f.slice/crio-1a99940f2ba42234d748f725df7572d138ee0bf15006db1f62534a3f7c6042c5 WatchSource:0}: Error finding container 1a99940f2ba42234d748f725df7572d138ee0bf15006db1f62534a3f7c6042c5: Status 404 returned error can't find the container with id 1a99940f2ba42234d748f725df7572d138ee0bf15006db1f62534a3f7c6042c5 Nov 26 08:44:30 crc kubenswrapper[4940]: I1126 08:44:30.643555 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-547b68454-mq24v" event={"ID":"41100356-33a2-4f08-be53-7df972e1063f","Type":"ContainerStarted","Data":"1a99940f2ba42234d748f725df7572d138ee0bf15006db1f62534a3f7c6042c5"} Nov 26 08:44:31 crc kubenswrapper[4940]: I1126 08:44:31.657190 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-547b68454-mq24v" event={"ID":"41100356-33a2-4f08-be53-7df972e1063f","Type":"ContainerStarted","Data":"4928ca362738a3a57e75ef97a6f159dee8f4e9f29d8f1a4af67673706f6c9d24"} Nov 26 08:44:31 crc kubenswrapper[4940]: I1126 08:44:31.657482 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-547b68454-mq24v" Nov 26 08:44:31 crc kubenswrapper[4940]: I1126 08:44:31.674813 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-547b68454-mq24v" podStartSLOduration=2.674790664 podStartE2EDuration="2.674790664s" podCreationTimestamp="2025-11-26 08:44:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:44:31.674583518 +0000 UTC m=+6573.194725167" watchObservedRunningTime="2025-11-26 08:44:31.674790664 +0000 UTC m=+6573.194932283" Nov 26 08:44:51 crc kubenswrapper[4940]: I1126 08:44:51.728169 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:44:51 crc kubenswrapper[4940]: I1126 08:44:51.728645 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.155302 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww"] Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.157632 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.160685 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.160905 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.168324 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww"] Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.232004 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d53cfca0-f04e-482f-a536-6fc940329283-config-volume\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.232107 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d53cfca0-f04e-482f-a536-6fc940329283-secret-volume\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.232278 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldwrz\" (UniqueName: \"kubernetes.io/projected/d53cfca0-f04e-482f-a536-6fc940329283-kube-api-access-ldwrz\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.333773 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d53cfca0-f04e-482f-a536-6fc940329283-config-volume\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.333841 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d53cfca0-f04e-482f-a536-6fc940329283-secret-volume\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.333929 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldwrz\" (UniqueName: \"kubernetes.io/projected/d53cfca0-f04e-482f-a536-6fc940329283-kube-api-access-ldwrz\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.335598 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d53cfca0-f04e-482f-a536-6fc940329283-config-volume\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.341333 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d53cfca0-f04e-482f-a536-6fc940329283-secret-volume\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.366358 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldwrz\" (UniqueName: \"kubernetes.io/projected/d53cfca0-f04e-482f-a536-6fc940329283-kube-api-access-ldwrz\") pod \"collect-profiles-29402445-nmdww\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.485377 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.929543 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww"] Nov 26 08:45:00 crc kubenswrapper[4940]: I1126 08:45:00.953801 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" event={"ID":"d53cfca0-f04e-482f-a536-6fc940329283","Type":"ContainerStarted","Data":"c28098cd5b209a72ccde6b35411f2c21ed9724181d2c5ae4d1324e4a64014b56"} Nov 26 08:45:01 crc kubenswrapper[4940]: I1126 08:45:01.629150 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-547b68454-mq24v" Nov 26 08:45:01 crc kubenswrapper[4940]: I1126 08:45:01.964313 4940 generic.go:334] "Generic (PLEG): container finished" podID="d53cfca0-f04e-482f-a536-6fc940329283" containerID="9e3961016e59236d040c4d49c63c58a5c259f9e70f1fcbf64ff0420e15542361" exitCode=0 Nov 26 08:45:01 crc kubenswrapper[4940]: I1126 08:45:01.964354 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" event={"ID":"d53cfca0-f04e-482f-a536-6fc940329283","Type":"ContainerDied","Data":"9e3961016e59236d040c4d49c63c58a5c259f9e70f1fcbf64ff0420e15542361"} Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.331191 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.382642 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d53cfca0-f04e-482f-a536-6fc940329283-secret-volume\") pod \"d53cfca0-f04e-482f-a536-6fc940329283\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.382814 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldwrz\" (UniqueName: \"kubernetes.io/projected/d53cfca0-f04e-482f-a536-6fc940329283-kube-api-access-ldwrz\") pod \"d53cfca0-f04e-482f-a536-6fc940329283\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.382859 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d53cfca0-f04e-482f-a536-6fc940329283-config-volume\") pod \"d53cfca0-f04e-482f-a536-6fc940329283\" (UID: \"d53cfca0-f04e-482f-a536-6fc940329283\") " Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.384249 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d53cfca0-f04e-482f-a536-6fc940329283-config-volume" (OuterVolumeSpecName: "config-volume") pod "d53cfca0-f04e-482f-a536-6fc940329283" (UID: "d53cfca0-f04e-482f-a536-6fc940329283"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.391431 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d53cfca0-f04e-482f-a536-6fc940329283-kube-api-access-ldwrz" (OuterVolumeSpecName: "kube-api-access-ldwrz") pod "d53cfca0-f04e-482f-a536-6fc940329283" (UID: "d53cfca0-f04e-482f-a536-6fc940329283"). InnerVolumeSpecName "kube-api-access-ldwrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.392755 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d53cfca0-f04e-482f-a536-6fc940329283-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d53cfca0-f04e-482f-a536-6fc940329283" (UID: "d53cfca0-f04e-482f-a536-6fc940329283"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.485007 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d53cfca0-f04e-482f-a536-6fc940329283-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.485078 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldwrz\" (UniqueName: \"kubernetes.io/projected/d53cfca0-f04e-482f-a536-6fc940329283-kube-api-access-ldwrz\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.485091 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d53cfca0-f04e-482f-a536-6fc940329283-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.983870 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" event={"ID":"d53cfca0-f04e-482f-a536-6fc940329283","Type":"ContainerDied","Data":"c28098cd5b209a72ccde6b35411f2c21ed9724181d2c5ae4d1324e4a64014b56"} Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.983906 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c28098cd5b209a72ccde6b35411f2c21ed9724181d2c5ae4d1324e4a64014b56" Nov 26 08:45:03 crc kubenswrapper[4940]: I1126 08:45:03.983971 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww" Nov 26 08:45:04 crc kubenswrapper[4940]: I1126 08:45:04.411233 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf"] Nov 26 08:45:04 crc kubenswrapper[4940]: I1126 08:45:04.419288 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-xx2qf"] Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.182428 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6885546-c253-4072-9973-596e65bcf799" path="/var/lib/kubelet/pods/c6885546-c253-4072-9973-596e65bcf799/volumes" Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.903020 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 08:45:05 crc kubenswrapper[4940]: E1126 08:45:05.903391 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d53cfca0-f04e-482f-a536-6fc940329283" containerName="collect-profiles" Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.903411 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d53cfca0-f04e-482f-a536-6fc940329283" containerName="collect-profiles" Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.903632 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d53cfca0-f04e-482f-a536-6fc940329283" containerName="collect-profiles" Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.904310 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.911447 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.911600 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-xc7hx" Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.911705 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 08:45:05 crc kubenswrapper[4940]: I1126 08:45:05.915559 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.033027 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config-secret\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.033172 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.033212 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vsnn\" (UniqueName: \"kubernetes.io/projected/4247e8e1-7d42-4867-a933-7d0689cc792f-kube-api-access-6vsnn\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.128090 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 26 08:45:06 crc kubenswrapper[4940]: E1126 08:45:06.128789 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-6vsnn openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="4247e8e1-7d42-4867-a933-7d0689cc792f" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.134862 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.134957 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vsnn\" (UniqueName: \"kubernetes.io/projected/4247e8e1-7d42-4867-a933-7d0689cc792f-kube-api-access-6vsnn\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.135129 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config-secret\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.136078 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: E1126 08:45:06.137183 4940 projected.go:194] Error preparing data for projected volume kube-api-access-6vsnn for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (4247e8e1-7d42-4867-a933-7d0689cc792f) does not match the UID in record. The object might have been deleted and then recreated Nov 26 08:45:06 crc kubenswrapper[4940]: E1126 08:45:06.137273 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4247e8e1-7d42-4867-a933-7d0689cc792f-kube-api-access-6vsnn podName:4247e8e1-7d42-4867-a933-7d0689cc792f nodeName:}" failed. No retries permitted until 2025-11-26 08:45:06.637247084 +0000 UTC m=+6608.157388723 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-6vsnn" (UniqueName: "kubernetes.io/projected/4247e8e1-7d42-4867-a933-7d0689cc792f-kube-api-access-6vsnn") pod "openstackclient" (UID: "4247e8e1-7d42-4867-a933-7d0689cc792f") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (4247e8e1-7d42-4867-a933-7d0689cc792f) does not match the UID in record. The object might have been deleted and then recreated Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.139455 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.141938 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config-secret\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.150863 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.152312 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.161663 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.236664 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config-secret\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.236748 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.236891 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5fhm\" (UniqueName: \"kubernetes.io/projected/687afc0a-79a8-492c-a626-44e95c547d23-kube-api-access-m5fhm\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.337778 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config-secret\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.337832 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.337952 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5fhm\" (UniqueName: \"kubernetes.io/projected/687afc0a-79a8-492c-a626-44e95c547d23-kube-api-access-m5fhm\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.339820 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.341899 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config-secret\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.362482 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5fhm\" (UniqueName: \"kubernetes.io/projected/687afc0a-79a8-492c-a626-44e95c547d23-kube-api-access-m5fhm\") pod \"openstackclient\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.514230 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.642399 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vsnn\" (UniqueName: \"kubernetes.io/projected/4247e8e1-7d42-4867-a933-7d0689cc792f-kube-api-access-6vsnn\") pod \"openstackclient\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " pod="openstack/openstackclient" Nov 26 08:45:06 crc kubenswrapper[4940]: E1126 08:45:06.648284 4940 projected.go:194] Error preparing data for projected volume kube-api-access-6vsnn for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (4247e8e1-7d42-4867-a933-7d0689cc792f) does not match the UID in record. The object might have been deleted and then recreated Nov 26 08:45:06 crc kubenswrapper[4940]: E1126 08:45:06.648596 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4247e8e1-7d42-4867-a933-7d0689cc792f-kube-api-access-6vsnn podName:4247e8e1-7d42-4867-a933-7d0689cc792f nodeName:}" failed. No retries permitted until 2025-11-26 08:45:07.648576689 +0000 UTC m=+6609.168718308 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-6vsnn" (UniqueName: "kubernetes.io/projected/4247e8e1-7d42-4867-a933-7d0689cc792f-kube-api-access-6vsnn") pod "openstackclient" (UID: "4247e8e1-7d42-4867-a933-7d0689cc792f") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (4247e8e1-7d42-4867-a933-7d0689cc792f) does not match the UID in record. The object might have been deleted and then recreated Nov 26 08:45:06 crc kubenswrapper[4940]: I1126 08:45:06.980293 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.008154 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.009289 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"687afc0a-79a8-492c-a626-44e95c547d23","Type":"ContainerStarted","Data":"0c9638f02a40393346332bd76aa8202860c22c113a046a7c8d7314b4561addc8"} Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.018567 4940 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="4247e8e1-7d42-4867-a933-7d0689cc792f" podUID="687afc0a-79a8-492c-a626-44e95c547d23" Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.022912 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.052088 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config-secret\") pod \"4247e8e1-7d42-4867-a933-7d0689cc792f\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.052347 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config\") pod \"4247e8e1-7d42-4867-a933-7d0689cc792f\" (UID: \"4247e8e1-7d42-4867-a933-7d0689cc792f\") " Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.053095 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vsnn\" (UniqueName: \"kubernetes.io/projected/4247e8e1-7d42-4867-a933-7d0689cc792f-kube-api-access-6vsnn\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.053142 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "4247e8e1-7d42-4867-a933-7d0689cc792f" (UID: "4247e8e1-7d42-4867-a933-7d0689cc792f"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.057781 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "4247e8e1-7d42-4867-a933-7d0689cc792f" (UID: "4247e8e1-7d42-4867-a933-7d0689cc792f"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.154851 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.154922 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4247e8e1-7d42-4867-a933-7d0689cc792f-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:07 crc kubenswrapper[4940]: I1126 08:45:07.180615 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4247e8e1-7d42-4867-a933-7d0689cc792f" path="/var/lib/kubelet/pods/4247e8e1-7d42-4867-a933-7d0689cc792f/volumes" Nov 26 08:45:08 crc kubenswrapper[4940]: I1126 08:45:08.018149 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:45:08 crc kubenswrapper[4940]: I1126 08:45:08.025738 4940 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="4247e8e1-7d42-4867-a933-7d0689cc792f" podUID="687afc0a-79a8-492c-a626-44e95c547d23" Nov 26 08:45:11 crc kubenswrapper[4940]: I1126 08:45:11.559202 4940 scope.go:117] "RemoveContainer" containerID="690ed756d5414940cba6c1b633b54b6fd1ca96e5eaade57510e396d6c720ddf5" Nov 26 08:45:18 crc kubenswrapper[4940]: I1126 08:45:18.112966 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"687afc0a-79a8-492c-a626-44e95c547d23","Type":"ContainerStarted","Data":"742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1"} Nov 26 08:45:18 crc kubenswrapper[4940]: I1126 08:45:18.156442 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.081034671 podStartE2EDuration="12.156425716s" podCreationTimestamp="2025-11-26 08:45:06 +0000 UTC" firstStartedPulling="2025-11-26 08:45:06.982838021 +0000 UTC m=+6608.502979640" lastFinishedPulling="2025-11-26 08:45:17.058229026 +0000 UTC m=+6618.578370685" observedRunningTime="2025-11-26 08:45:18.143188263 +0000 UTC m=+6619.663329942" watchObservedRunningTime="2025-11-26 08:45:18.156425716 +0000 UTC m=+6619.676567335" Nov 26 08:45:21 crc kubenswrapper[4940]: I1126 08:45:21.728407 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:45:21 crc kubenswrapper[4940]: I1126 08:45:21.729347 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:45:51 crc kubenswrapper[4940]: I1126 08:45:51.728807 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:45:51 crc kubenswrapper[4940]: I1126 08:45:51.729452 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:45:51 crc kubenswrapper[4940]: I1126 08:45:51.729510 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:45:51 crc kubenswrapper[4940]: I1126 08:45:51.730640 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:45:51 crc kubenswrapper[4940]: I1126 08:45:51.730894 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" gracePeriod=600 Nov 26 08:45:51 crc kubenswrapper[4940]: E1126 08:45:51.877962 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:45:52 crc kubenswrapper[4940]: I1126 08:45:52.443883 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" exitCode=0 Nov 26 08:45:52 crc kubenswrapper[4940]: I1126 08:45:52.443925 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad"} Nov 26 08:45:52 crc kubenswrapper[4940]: I1126 08:45:52.443956 4940 scope.go:117] "RemoveContainer" containerID="ccc069e03298539b4573c9b4c189488e5f91f473e2a3e105b8467faf049684f1" Nov 26 08:45:52 crc kubenswrapper[4940]: I1126 08:45:52.444523 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:45:52 crc kubenswrapper[4940]: E1126 08:45:52.444790 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:46:06 crc kubenswrapper[4940]: I1126 08:46:06.165618 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:46:06 crc kubenswrapper[4940]: E1126 08:46:06.166526 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.476755 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mrl64"] Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.479467 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.486959 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mrl64"] Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.618791 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-utilities\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.618840 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-catalog-content\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.618868 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qm4j\" (UniqueName: \"kubernetes.io/projected/356f5361-7288-4fa4-923d-27125d0caf37-kube-api-access-9qm4j\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.721073 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-utilities\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.721385 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-catalog-content\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.721405 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qm4j\" (UniqueName: \"kubernetes.io/projected/356f5361-7288-4fa4-923d-27125d0caf37-kube-api-access-9qm4j\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.721722 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-utilities\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.722113 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-catalog-content\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.742740 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qm4j\" (UniqueName: \"kubernetes.io/projected/356f5361-7288-4fa4-923d-27125d0caf37-kube-api-access-9qm4j\") pod \"community-operators-mrl64\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:17 crc kubenswrapper[4940]: I1126 08:46:17.813624 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:18 crc kubenswrapper[4940]: I1126 08:46:18.351910 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mrl64"] Nov 26 08:46:18 crc kubenswrapper[4940]: I1126 08:46:18.713307 4940 generic.go:334] "Generic (PLEG): container finished" podID="356f5361-7288-4fa4-923d-27125d0caf37" containerID="5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352" exitCode=0 Nov 26 08:46:18 crc kubenswrapper[4940]: I1126 08:46:18.713355 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mrl64" event={"ID":"356f5361-7288-4fa4-923d-27125d0caf37","Type":"ContainerDied","Data":"5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352"} Nov 26 08:46:18 crc kubenswrapper[4940]: I1126 08:46:18.713383 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mrl64" event={"ID":"356f5361-7288-4fa4-923d-27125d0caf37","Type":"ContainerStarted","Data":"a2c4cc5e333c99832523cce6c1ef146ec1a3d4dfa52bfcda75190d228a024967"} Nov 26 08:46:19 crc kubenswrapper[4940]: I1126 08:46:19.170274 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:46:19 crc kubenswrapper[4940]: E1126 08:46:19.170832 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:46:20 crc kubenswrapper[4940]: I1126 08:46:20.744552 4940 generic.go:334] "Generic (PLEG): container finished" podID="356f5361-7288-4fa4-923d-27125d0caf37" containerID="fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a" exitCode=0 Nov 26 08:46:20 crc kubenswrapper[4940]: I1126 08:46:20.744637 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mrl64" event={"ID":"356f5361-7288-4fa4-923d-27125d0caf37","Type":"ContainerDied","Data":"fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a"} Nov 26 08:46:21 crc kubenswrapper[4940]: I1126 08:46:21.771488 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mrl64" event={"ID":"356f5361-7288-4fa4-923d-27125d0caf37","Type":"ContainerStarted","Data":"61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa"} Nov 26 08:46:21 crc kubenswrapper[4940]: I1126 08:46:21.793637 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mrl64" podStartSLOduration=2.273541777 podStartE2EDuration="4.79362192s" podCreationTimestamp="2025-11-26 08:46:17 +0000 UTC" firstStartedPulling="2025-11-26 08:46:18.714775683 +0000 UTC m=+6680.234917292" lastFinishedPulling="2025-11-26 08:46:21.234855816 +0000 UTC m=+6682.754997435" observedRunningTime="2025-11-26 08:46:21.792147464 +0000 UTC m=+6683.312289093" watchObservedRunningTime="2025-11-26 08:46:21.79362192 +0000 UTC m=+6683.313763539" Nov 26 08:46:27 crc kubenswrapper[4940]: I1126 08:46:27.814459 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:27 crc kubenswrapper[4940]: I1126 08:46:27.816181 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:27 crc kubenswrapper[4940]: I1126 08:46:27.879622 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:28 crc kubenswrapper[4940]: I1126 08:46:28.881445 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:28 crc kubenswrapper[4940]: I1126 08:46:28.932090 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mrl64"] Nov 26 08:46:30 crc kubenswrapper[4940]: I1126 08:46:30.847134 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mrl64" podUID="356f5361-7288-4fa4-923d-27125d0caf37" containerName="registry-server" containerID="cri-o://61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa" gracePeriod=2 Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.165214 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:46:31 crc kubenswrapper[4940]: E1126 08:46:31.165896 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.283146 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.371308 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-catalog-content\") pod \"356f5361-7288-4fa4-923d-27125d0caf37\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.371391 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qm4j\" (UniqueName: \"kubernetes.io/projected/356f5361-7288-4fa4-923d-27125d0caf37-kube-api-access-9qm4j\") pod \"356f5361-7288-4fa4-923d-27125d0caf37\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.371412 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-utilities\") pod \"356f5361-7288-4fa4-923d-27125d0caf37\" (UID: \"356f5361-7288-4fa4-923d-27125d0caf37\") " Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.373096 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-utilities" (OuterVolumeSpecName: "utilities") pod "356f5361-7288-4fa4-923d-27125d0caf37" (UID: "356f5361-7288-4fa4-923d-27125d0caf37"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.374787 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.376935 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/356f5361-7288-4fa4-923d-27125d0caf37-kube-api-access-9qm4j" (OuterVolumeSpecName: "kube-api-access-9qm4j") pod "356f5361-7288-4fa4-923d-27125d0caf37" (UID: "356f5361-7288-4fa4-923d-27125d0caf37"). InnerVolumeSpecName "kube-api-access-9qm4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.438531 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "356f5361-7288-4fa4-923d-27125d0caf37" (UID: "356f5361-7288-4fa4-923d-27125d0caf37"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.476758 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qm4j\" (UniqueName: \"kubernetes.io/projected/356f5361-7288-4fa4-923d-27125d0caf37-kube-api-access-9qm4j\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.477075 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/356f5361-7288-4fa4-923d-27125d0caf37-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.859220 4940 generic.go:334] "Generic (PLEG): container finished" podID="356f5361-7288-4fa4-923d-27125d0caf37" containerID="61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa" exitCode=0 Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.859278 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mrl64" event={"ID":"356f5361-7288-4fa4-923d-27125d0caf37","Type":"ContainerDied","Data":"61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa"} Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.859308 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mrl64" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.859330 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mrl64" event={"ID":"356f5361-7288-4fa4-923d-27125d0caf37","Type":"ContainerDied","Data":"a2c4cc5e333c99832523cce6c1ef146ec1a3d4dfa52bfcda75190d228a024967"} Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.859362 4940 scope.go:117] "RemoveContainer" containerID="61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.893502 4940 scope.go:117] "RemoveContainer" containerID="fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.906141 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mrl64"] Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.910743 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mrl64"] Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.930690 4940 scope.go:117] "RemoveContainer" containerID="5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.961784 4940 scope.go:117] "RemoveContainer" containerID="61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa" Nov 26 08:46:31 crc kubenswrapper[4940]: E1126 08:46:31.962316 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa\": container with ID starting with 61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa not found: ID does not exist" containerID="61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.962370 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa"} err="failed to get container status \"61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa\": rpc error: code = NotFound desc = could not find container \"61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa\": container with ID starting with 61cf4aa1bbfed1c0fec937fbb54c26bdf0b8e54dfc2b28d6eb7a8c1cb7ba8daa not found: ID does not exist" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.962400 4940 scope.go:117] "RemoveContainer" containerID="fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a" Nov 26 08:46:31 crc kubenswrapper[4940]: E1126 08:46:31.962798 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a\": container with ID starting with fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a not found: ID does not exist" containerID="fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.962833 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a"} err="failed to get container status \"fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a\": rpc error: code = NotFound desc = could not find container \"fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a\": container with ID starting with fb8a4852320357fc19d565d34fba458d2501659ab0b31c198740183b9c387c8a not found: ID does not exist" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.962861 4940 scope.go:117] "RemoveContainer" containerID="5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352" Nov 26 08:46:31 crc kubenswrapper[4940]: E1126 08:46:31.963136 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352\": container with ID starting with 5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352 not found: ID does not exist" containerID="5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352" Nov 26 08:46:31 crc kubenswrapper[4940]: I1126 08:46:31.963165 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352"} err="failed to get container status \"5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352\": rpc error: code = NotFound desc = could not find container \"5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352\": container with ID starting with 5b0efde07b19294b10d93205dd1cb3b34740fa302b42e225f1dfbadd4aa93352 not found: ID does not exist" Nov 26 08:46:32 crc kubenswrapper[4940]: E1126 08:46:32.492769 4940 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.58:44900->38.102.83.58:46351: write tcp 38.102.83.58:44900->38.102.83.58:46351: write: connection reset by peer Nov 26 08:46:33 crc kubenswrapper[4940]: I1126 08:46:33.181985 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="356f5361-7288-4fa4-923d-27125d0caf37" path="/var/lib/kubelet/pods/356f5361-7288-4fa4-923d-27125d0caf37/volumes" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.386852 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-b0c5-account-create-update-z7rdr"] Nov 26 08:46:40 crc kubenswrapper[4940]: E1126 08:46:40.387807 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="356f5361-7288-4fa4-923d-27125d0caf37" containerName="extract-content" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.387824 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="356f5361-7288-4fa4-923d-27125d0caf37" containerName="extract-content" Nov 26 08:46:40 crc kubenswrapper[4940]: E1126 08:46:40.387854 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="356f5361-7288-4fa4-923d-27125d0caf37" containerName="registry-server" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.387862 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="356f5361-7288-4fa4-923d-27125d0caf37" containerName="registry-server" Nov 26 08:46:40 crc kubenswrapper[4940]: E1126 08:46:40.387873 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="356f5361-7288-4fa4-923d-27125d0caf37" containerName="extract-utilities" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.387882 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="356f5361-7288-4fa4-923d-27125d0caf37" containerName="extract-utilities" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.388182 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="356f5361-7288-4fa4-923d-27125d0caf37" containerName="registry-server" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.388921 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.391169 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.396111 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-8rk6t"] Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.397922 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.404322 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-8rk6t"] Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.412158 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b0c5-account-create-update-z7rdr"] Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.537305 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4q9t\" (UniqueName: \"kubernetes.io/projected/29328f7e-482f-4011-80b6-146c887cdc3c-kube-api-access-l4q9t\") pod \"barbican-db-create-8rk6t\" (UID: \"29328f7e-482f-4011-80b6-146c887cdc3c\") " pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.537381 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d065c95a-4ae1-499c-ba83-64a4b317c524-operator-scripts\") pod \"barbican-b0c5-account-create-update-z7rdr\" (UID: \"d065c95a-4ae1-499c-ba83-64a4b317c524\") " pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.537503 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvkd6\" (UniqueName: \"kubernetes.io/projected/d065c95a-4ae1-499c-ba83-64a4b317c524-kube-api-access-zvkd6\") pod \"barbican-b0c5-account-create-update-z7rdr\" (UID: \"d065c95a-4ae1-499c-ba83-64a4b317c524\") " pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.537547 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29328f7e-482f-4011-80b6-146c887cdc3c-operator-scripts\") pod \"barbican-db-create-8rk6t\" (UID: \"29328f7e-482f-4011-80b6-146c887cdc3c\") " pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.638928 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4q9t\" (UniqueName: \"kubernetes.io/projected/29328f7e-482f-4011-80b6-146c887cdc3c-kube-api-access-l4q9t\") pod \"barbican-db-create-8rk6t\" (UID: \"29328f7e-482f-4011-80b6-146c887cdc3c\") " pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.639076 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d065c95a-4ae1-499c-ba83-64a4b317c524-operator-scripts\") pod \"barbican-b0c5-account-create-update-z7rdr\" (UID: \"d065c95a-4ae1-499c-ba83-64a4b317c524\") " pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.639220 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvkd6\" (UniqueName: \"kubernetes.io/projected/d065c95a-4ae1-499c-ba83-64a4b317c524-kube-api-access-zvkd6\") pod \"barbican-b0c5-account-create-update-z7rdr\" (UID: \"d065c95a-4ae1-499c-ba83-64a4b317c524\") " pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.639243 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29328f7e-482f-4011-80b6-146c887cdc3c-operator-scripts\") pod \"barbican-db-create-8rk6t\" (UID: \"29328f7e-482f-4011-80b6-146c887cdc3c\") " pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.639935 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d065c95a-4ae1-499c-ba83-64a4b317c524-operator-scripts\") pod \"barbican-b0c5-account-create-update-z7rdr\" (UID: \"d065c95a-4ae1-499c-ba83-64a4b317c524\") " pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.640194 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29328f7e-482f-4011-80b6-146c887cdc3c-operator-scripts\") pod \"barbican-db-create-8rk6t\" (UID: \"29328f7e-482f-4011-80b6-146c887cdc3c\") " pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.657529 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvkd6\" (UniqueName: \"kubernetes.io/projected/d065c95a-4ae1-499c-ba83-64a4b317c524-kube-api-access-zvkd6\") pod \"barbican-b0c5-account-create-update-z7rdr\" (UID: \"d065c95a-4ae1-499c-ba83-64a4b317c524\") " pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.657529 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4q9t\" (UniqueName: \"kubernetes.io/projected/29328f7e-482f-4011-80b6-146c887cdc3c-kube-api-access-l4q9t\") pod \"barbican-db-create-8rk6t\" (UID: \"29328f7e-482f-4011-80b6-146c887cdc3c\") " pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.715403 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:40 crc kubenswrapper[4940]: I1126 08:46:40.736581 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:41 crc kubenswrapper[4940]: I1126 08:46:41.140698 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b0c5-account-create-update-z7rdr"] Nov 26 08:46:41 crc kubenswrapper[4940]: I1126 08:46:41.199861 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-8rk6t"] Nov 26 08:46:41 crc kubenswrapper[4940]: W1126 08:46:41.203000 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29328f7e_482f_4011_80b6_146c887cdc3c.slice/crio-39ad57cee3d4b3974ebfcec7c5aa0ee5fecfb74d15d611b7faf57944bb7b2e3f WatchSource:0}: Error finding container 39ad57cee3d4b3974ebfcec7c5aa0ee5fecfb74d15d611b7faf57944bb7b2e3f: Status 404 returned error can't find the container with id 39ad57cee3d4b3974ebfcec7c5aa0ee5fecfb74d15d611b7faf57944bb7b2e3f Nov 26 08:46:41 crc kubenswrapper[4940]: I1126 08:46:41.954533 4940 generic.go:334] "Generic (PLEG): container finished" podID="d065c95a-4ae1-499c-ba83-64a4b317c524" containerID="bf0fb024cdc94dd3d4c72f7b2e81cfb94d3d5c94f0ba82ff8dc58168c5d31bac" exitCode=0 Nov 26 08:46:41 crc kubenswrapper[4940]: I1126 08:46:41.954947 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b0c5-account-create-update-z7rdr" event={"ID":"d065c95a-4ae1-499c-ba83-64a4b317c524","Type":"ContainerDied","Data":"bf0fb024cdc94dd3d4c72f7b2e81cfb94d3d5c94f0ba82ff8dc58168c5d31bac"} Nov 26 08:46:41 crc kubenswrapper[4940]: I1126 08:46:41.956092 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b0c5-account-create-update-z7rdr" event={"ID":"d065c95a-4ae1-499c-ba83-64a4b317c524","Type":"ContainerStarted","Data":"b128a539e7d063f85260921938bf3ce18a33375363ff4a4001fb35a483032b82"} Nov 26 08:46:41 crc kubenswrapper[4940]: I1126 08:46:41.957971 4940 generic.go:334] "Generic (PLEG): container finished" podID="29328f7e-482f-4011-80b6-146c887cdc3c" containerID="45545a54eb13926dd3f34a502f30d4936a3be621f40e7caa586835bf657ee125" exitCode=0 Nov 26 08:46:41 crc kubenswrapper[4940]: I1126 08:46:41.958074 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-8rk6t" event={"ID":"29328f7e-482f-4011-80b6-146c887cdc3c","Type":"ContainerDied","Data":"45545a54eb13926dd3f34a502f30d4936a3be621f40e7caa586835bf657ee125"} Nov 26 08:46:41 crc kubenswrapper[4940]: I1126 08:46:41.958124 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-8rk6t" event={"ID":"29328f7e-482f-4011-80b6-146c887cdc3c","Type":"ContainerStarted","Data":"39ad57cee3d4b3974ebfcec7c5aa0ee5fecfb74d15d611b7faf57944bb7b2e3f"} Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.294543 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.303807 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.390475 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d065c95a-4ae1-499c-ba83-64a4b317c524-operator-scripts\") pod \"d065c95a-4ae1-499c-ba83-64a4b317c524\" (UID: \"d065c95a-4ae1-499c-ba83-64a4b317c524\") " Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.390547 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29328f7e-482f-4011-80b6-146c887cdc3c-operator-scripts\") pod \"29328f7e-482f-4011-80b6-146c887cdc3c\" (UID: \"29328f7e-482f-4011-80b6-146c887cdc3c\") " Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.390619 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4q9t\" (UniqueName: \"kubernetes.io/projected/29328f7e-482f-4011-80b6-146c887cdc3c-kube-api-access-l4q9t\") pod \"29328f7e-482f-4011-80b6-146c887cdc3c\" (UID: \"29328f7e-482f-4011-80b6-146c887cdc3c\") " Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.390655 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvkd6\" (UniqueName: \"kubernetes.io/projected/d065c95a-4ae1-499c-ba83-64a4b317c524-kube-api-access-zvkd6\") pod \"d065c95a-4ae1-499c-ba83-64a4b317c524\" (UID: \"d065c95a-4ae1-499c-ba83-64a4b317c524\") " Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.391096 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29328f7e-482f-4011-80b6-146c887cdc3c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "29328f7e-482f-4011-80b6-146c887cdc3c" (UID: "29328f7e-482f-4011-80b6-146c887cdc3c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.391225 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d065c95a-4ae1-499c-ba83-64a4b317c524-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d065c95a-4ae1-499c-ba83-64a4b317c524" (UID: "d065c95a-4ae1-499c-ba83-64a4b317c524"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.397357 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d065c95a-4ae1-499c-ba83-64a4b317c524-kube-api-access-zvkd6" (OuterVolumeSpecName: "kube-api-access-zvkd6") pod "d065c95a-4ae1-499c-ba83-64a4b317c524" (UID: "d065c95a-4ae1-499c-ba83-64a4b317c524"). InnerVolumeSpecName "kube-api-access-zvkd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.398315 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29328f7e-482f-4011-80b6-146c887cdc3c-kube-api-access-l4q9t" (OuterVolumeSpecName: "kube-api-access-l4q9t") pod "29328f7e-482f-4011-80b6-146c887cdc3c" (UID: "29328f7e-482f-4011-80b6-146c887cdc3c"). InnerVolumeSpecName "kube-api-access-l4q9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.493177 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d065c95a-4ae1-499c-ba83-64a4b317c524-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.493221 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29328f7e-482f-4011-80b6-146c887cdc3c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.493237 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4q9t\" (UniqueName: \"kubernetes.io/projected/29328f7e-482f-4011-80b6-146c887cdc3c-kube-api-access-l4q9t\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.493316 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvkd6\" (UniqueName: \"kubernetes.io/projected/d065c95a-4ae1-499c-ba83-64a4b317c524-kube-api-access-zvkd6\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.974779 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-8rk6t" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.974768 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-8rk6t" event={"ID":"29328f7e-482f-4011-80b6-146c887cdc3c","Type":"ContainerDied","Data":"39ad57cee3d4b3974ebfcec7c5aa0ee5fecfb74d15d611b7faf57944bb7b2e3f"} Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.974907 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39ad57cee3d4b3974ebfcec7c5aa0ee5fecfb74d15d611b7faf57944bb7b2e3f" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.977213 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b0c5-account-create-update-z7rdr" event={"ID":"d065c95a-4ae1-499c-ba83-64a4b317c524","Type":"ContainerDied","Data":"b128a539e7d063f85260921938bf3ce18a33375363ff4a4001fb35a483032b82"} Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.977239 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b128a539e7d063f85260921938bf3ce18a33375363ff4a4001fb35a483032b82" Nov 26 08:46:43 crc kubenswrapper[4940]: I1126 08:46:43.977288 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b0c5-account-create-update-z7rdr" Nov 26 08:46:44 crc kubenswrapper[4940]: I1126 08:46:44.166695 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:46:44 crc kubenswrapper[4940]: E1126 08:46:44.166949 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.600429 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-54ngd"] Nov 26 08:46:45 crc kubenswrapper[4940]: E1126 08:46:45.601310 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29328f7e-482f-4011-80b6-146c887cdc3c" containerName="mariadb-database-create" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.601329 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="29328f7e-482f-4011-80b6-146c887cdc3c" containerName="mariadb-database-create" Nov 26 08:46:45 crc kubenswrapper[4940]: E1126 08:46:45.601348 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d065c95a-4ae1-499c-ba83-64a4b317c524" containerName="mariadb-account-create-update" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.601354 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d065c95a-4ae1-499c-ba83-64a4b317c524" containerName="mariadb-account-create-update" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.601535 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d065c95a-4ae1-499c-ba83-64a4b317c524" containerName="mariadb-account-create-update" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.601565 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="29328f7e-482f-4011-80b6-146c887cdc3c" containerName="mariadb-database-create" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.602226 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.605295 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-znqnv" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.605432 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.610572 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-54ngd"] Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.643132 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-combined-ca-bundle\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.643205 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-db-sync-config-data\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.643228 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mccw\" (UniqueName: \"kubernetes.io/projected/0460ba04-08aa-4afe-9125-f1eb105161a3-kube-api-access-2mccw\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.745710 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-combined-ca-bundle\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.745767 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-db-sync-config-data\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.745791 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mccw\" (UniqueName: \"kubernetes.io/projected/0460ba04-08aa-4afe-9125-f1eb105161a3-kube-api-access-2mccw\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.752672 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-db-sync-config-data\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.762780 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-combined-ca-bundle\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.803006 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mccw\" (UniqueName: \"kubernetes.io/projected/0460ba04-08aa-4afe-9125-f1eb105161a3-kube-api-access-2mccw\") pod \"barbican-db-sync-54ngd\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:45 crc kubenswrapper[4940]: I1126 08:46:45.979820 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:46 crc kubenswrapper[4940]: I1126 08:46:46.415556 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-54ngd"] Nov 26 08:46:47 crc kubenswrapper[4940]: I1126 08:46:47.001395 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-54ngd" event={"ID":"0460ba04-08aa-4afe-9125-f1eb105161a3","Type":"ContainerStarted","Data":"7dc206c8f89e124e804e8452802d7a5cd2fe2f725980b087c3edf7f424f48071"} Nov 26 08:46:52 crc kubenswrapper[4940]: I1126 08:46:52.044192 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-54ngd" event={"ID":"0460ba04-08aa-4afe-9125-f1eb105161a3","Type":"ContainerStarted","Data":"3816559f4bd9844a4e2d1aee689aa7ccd9927eed247747ab2df268679da453b3"} Nov 26 08:46:52 crc kubenswrapper[4940]: I1126 08:46:52.066495 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-54ngd" podStartSLOduration=2.615451487 podStartE2EDuration="7.06646984s" podCreationTimestamp="2025-11-26 08:46:45 +0000 UTC" firstStartedPulling="2025-11-26 08:46:46.424638516 +0000 UTC m=+6707.944780135" lastFinishedPulling="2025-11-26 08:46:50.875656869 +0000 UTC m=+6712.395798488" observedRunningTime="2025-11-26 08:46:52.061572825 +0000 UTC m=+6713.581714514" watchObservedRunningTime="2025-11-26 08:46:52.06646984 +0000 UTC m=+6713.586611479" Nov 26 08:46:53 crc kubenswrapper[4940]: I1126 08:46:53.055419 4940 generic.go:334] "Generic (PLEG): container finished" podID="0460ba04-08aa-4afe-9125-f1eb105161a3" containerID="3816559f4bd9844a4e2d1aee689aa7ccd9927eed247747ab2df268679da453b3" exitCode=0 Nov 26 08:46:53 crc kubenswrapper[4940]: I1126 08:46:53.055476 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-54ngd" event={"ID":"0460ba04-08aa-4afe-9125-f1eb105161a3","Type":"ContainerDied","Data":"3816559f4bd9844a4e2d1aee689aa7ccd9927eed247747ab2df268679da453b3"} Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.354089 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.500716 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-combined-ca-bundle\") pod \"0460ba04-08aa-4afe-9125-f1eb105161a3\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.500833 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mccw\" (UniqueName: \"kubernetes.io/projected/0460ba04-08aa-4afe-9125-f1eb105161a3-kube-api-access-2mccw\") pod \"0460ba04-08aa-4afe-9125-f1eb105161a3\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.500900 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-db-sync-config-data\") pod \"0460ba04-08aa-4afe-9125-f1eb105161a3\" (UID: \"0460ba04-08aa-4afe-9125-f1eb105161a3\") " Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.507052 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0460ba04-08aa-4afe-9125-f1eb105161a3" (UID: "0460ba04-08aa-4afe-9125-f1eb105161a3"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.508115 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0460ba04-08aa-4afe-9125-f1eb105161a3-kube-api-access-2mccw" (OuterVolumeSpecName: "kube-api-access-2mccw") pod "0460ba04-08aa-4afe-9125-f1eb105161a3" (UID: "0460ba04-08aa-4afe-9125-f1eb105161a3"). InnerVolumeSpecName "kube-api-access-2mccw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.539099 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0460ba04-08aa-4afe-9125-f1eb105161a3" (UID: "0460ba04-08aa-4afe-9125-f1eb105161a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.602445 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.602482 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mccw\" (UniqueName: \"kubernetes.io/projected/0460ba04-08aa-4afe-9125-f1eb105161a3-kube-api-access-2mccw\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:54 crc kubenswrapper[4940]: I1126 08:46:54.602496 4940 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0460ba04-08aa-4afe-9125-f1eb105161a3-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.074342 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-54ngd" event={"ID":"0460ba04-08aa-4afe-9125-f1eb105161a3","Type":"ContainerDied","Data":"7dc206c8f89e124e804e8452802d7a5cd2fe2f725980b087c3edf7f424f48071"} Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.074616 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7dc206c8f89e124e804e8452802d7a5cd2fe2f725980b087c3edf7f424f48071" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.074390 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-54ngd" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.397448 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-dddb6ff69-6kv98"] Nov 26 08:46:55 crc kubenswrapper[4940]: E1126 08:46:55.397895 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0460ba04-08aa-4afe-9125-f1eb105161a3" containerName="barbican-db-sync" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.397914 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0460ba04-08aa-4afe-9125-f1eb105161a3" containerName="barbican-db-sync" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.398182 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0460ba04-08aa-4afe-9125-f1eb105161a3" containerName="barbican-db-sync" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.399371 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.405560 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.405706 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.405818 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-znqnv" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.416566 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-dddb6ff69-6kv98"] Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.424022 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-f4477dc54-7qvj8"] Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.425512 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.430761 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.456760 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-f4477dc54-7qvj8"] Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.469419 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84cdc5c6c-h99bn"] Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.471005 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.485816 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84cdc5c6c-h99bn"] Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.525640 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-config-data-custom\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.525717 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcs8x\" (UniqueName: \"kubernetes.io/projected/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-kube-api-access-fcs8x\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.525789 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-logs\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.526829 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-config-data\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.526879 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-combined-ca-bundle\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.586530 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-649595bd6-sghk8"] Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.588322 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.590116 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.596250 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-649595bd6-sghk8"] Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628236 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcs8x\" (UniqueName: \"kubernetes.io/projected/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-kube-api-access-fcs8x\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628313 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-config-data-custom\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628336 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-config\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628360 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-nb\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628391 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kr9fg\" (UniqueName: \"kubernetes.io/projected/4066a907-0a22-41a0-bcda-727d4b7cad23-kube-api-access-kr9fg\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-logs\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628440 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-config-data\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628498 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-dns-svc\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628569 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-combined-ca-bundle\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628600 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-config-data\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628624 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-combined-ca-bundle\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628651 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-config-data\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628678 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-logs\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628713 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpwtm\" (UniqueName: \"kubernetes.io/projected/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-kube-api-access-jpwtm\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628738 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-combined-ca-bundle\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628752 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-logs\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628780 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4066a907-0a22-41a0-bcda-727d4b7cad23-logs\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628808 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gn788\" (UniqueName: \"kubernetes.io/projected/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-kube-api-access-gn788\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628832 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-config-data-custom\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628870 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-sb\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.628892 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-config-data-custom\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.634984 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-config-data-custom\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.635859 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-config-data\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.636734 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-combined-ca-bundle\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.658625 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcs8x\" (UniqueName: \"kubernetes.io/projected/55c1f084-6852-42a7-bbe2-9d9f1ec146dd-kube-api-access-fcs8x\") pod \"barbican-worker-dddb6ff69-6kv98\" (UID: \"55c1f084-6852-42a7-bbe2-9d9f1ec146dd\") " pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.725213 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-dddb6ff69-6kv98" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.729960 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-config-data\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730004 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-dns-svc\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730032 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-combined-ca-bundle\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730076 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-config-data\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730097 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-logs\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730127 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpwtm\" (UniqueName: \"kubernetes.io/projected/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-kube-api-access-jpwtm\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730147 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-combined-ca-bundle\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730175 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4066a907-0a22-41a0-bcda-727d4b7cad23-logs\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730193 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gn788\" (UniqueName: \"kubernetes.io/projected/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-kube-api-access-gn788\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730224 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-sb\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730241 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-config-data-custom\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730269 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-config-data-custom\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730290 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-config\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730318 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-nb\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.730360 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kr9fg\" (UniqueName: \"kubernetes.io/projected/4066a907-0a22-41a0-bcda-727d4b7cad23-kube-api-access-kr9fg\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.731774 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-logs\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.731905 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-dns-svc\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.732338 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4066a907-0a22-41a0-bcda-727d4b7cad23-logs\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.732394 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-config\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.732453 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-nb\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.736657 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-sb\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.738169 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-combined-ca-bundle\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.739977 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-config-data\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.741963 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-config-data-custom\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.749414 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-config-data-custom\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.750478 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-combined-ca-bundle\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.753462 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kr9fg\" (UniqueName: \"kubernetes.io/projected/4066a907-0a22-41a0-bcda-727d4b7cad23-kube-api-access-kr9fg\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.753701 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4066a907-0a22-41a0-bcda-727d4b7cad23-config-data\") pod \"barbican-keystone-listener-f4477dc54-7qvj8\" (UID: \"4066a907-0a22-41a0-bcda-727d4b7cad23\") " pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.754736 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpwtm\" (UniqueName: \"kubernetes.io/projected/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-kube-api-access-jpwtm\") pod \"dnsmasq-dns-84cdc5c6c-h99bn\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.755144 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gn788\" (UniqueName: \"kubernetes.io/projected/8c5bf175-86a7-4ea0-854e-2f2751dcd74f-kube-api-access-gn788\") pod \"barbican-api-649595bd6-sghk8\" (UID: \"8c5bf175-86a7-4ea0-854e-2f2751dcd74f\") " pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.790291 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:55 crc kubenswrapper[4940]: I1126 08:46:55.902587 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:56 crc kubenswrapper[4940]: I1126 08:46:56.041621 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" Nov 26 08:46:56 crc kubenswrapper[4940]: I1126 08:46:56.182112 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-649595bd6-sghk8"] Nov 26 08:46:56 crc kubenswrapper[4940]: I1126 08:46:56.196751 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-dddb6ff69-6kv98"] Nov 26 08:46:56 crc kubenswrapper[4940]: I1126 08:46:56.267411 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84cdc5c6c-h99bn"] Nov 26 08:46:56 crc kubenswrapper[4940]: W1126 08:46:56.275335 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4fd24aa_3421_4c2c_b5e2_e6401483e4b1.slice/crio-f5e41d3a1a060edb8e8809b455f07abfe66518b8ef63071a4d097094bf077201 WatchSource:0}: Error finding container f5e41d3a1a060edb8e8809b455f07abfe66518b8ef63071a4d097094bf077201: Status 404 returned error can't find the container with id f5e41d3a1a060edb8e8809b455f07abfe66518b8ef63071a4d097094bf077201 Nov 26 08:46:56 crc kubenswrapper[4940]: I1126 08:46:56.473231 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-f4477dc54-7qvj8"] Nov 26 08:46:56 crc kubenswrapper[4940]: W1126 08:46:56.490229 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4066a907_0a22_41a0_bcda_727d4b7cad23.slice/crio-6e0dcba2cf6fde935f4233df20b1ae1ba84ecc8e206349577bc1e3d4d0d246ca WatchSource:0}: Error finding container 6e0dcba2cf6fde935f4233df20b1ae1ba84ecc8e206349577bc1e3d4d0d246ca: Status 404 returned error can't find the container with id 6e0dcba2cf6fde935f4233df20b1ae1ba84ecc8e206349577bc1e3d4d0d246ca Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.100022 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-dddb6ff69-6kv98" event={"ID":"55c1f084-6852-42a7-bbe2-9d9f1ec146dd","Type":"ContainerStarted","Data":"6d977a4484c7c6e51ce014cae8dc512ab13d37973259227bbce06af44b1e9157"} Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.102937 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-649595bd6-sghk8" event={"ID":"8c5bf175-86a7-4ea0-854e-2f2751dcd74f","Type":"ContainerStarted","Data":"3c81d2c768496c8b0d2c329f12c395df975638f3be49c223845ef08ffed69301"} Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.106494 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-649595bd6-sghk8" event={"ID":"8c5bf175-86a7-4ea0-854e-2f2751dcd74f","Type":"ContainerStarted","Data":"c8c75ff63cc88ed85d7e0fa31471639adaaf1ed4051279a009962c4042d3180c"} Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.106527 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-649595bd6-sghk8" event={"ID":"8c5bf175-86a7-4ea0-854e-2f2751dcd74f","Type":"ContainerStarted","Data":"df328a81952e91996c05341a3a6eadb2b095a9a4f4500c2707537bfb8c9c8d15"} Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.107732 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.107758 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.111504 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" event={"ID":"4066a907-0a22-41a0-bcda-727d4b7cad23","Type":"ContainerStarted","Data":"6e0dcba2cf6fde935f4233df20b1ae1ba84ecc8e206349577bc1e3d4d0d246ca"} Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.113163 4940 generic.go:334] "Generic (PLEG): container finished" podID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" containerID="2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73" exitCode=0 Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.113192 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" event={"ID":"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1","Type":"ContainerDied","Data":"2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73"} Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.113208 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" event={"ID":"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1","Type":"ContainerStarted","Data":"f5e41d3a1a060edb8e8809b455f07abfe66518b8ef63071a4d097094bf077201"} Nov 26 08:46:57 crc kubenswrapper[4940]: I1126 08:46:57.132555 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-649595bd6-sghk8" podStartSLOduration=2.132535158 podStartE2EDuration="2.132535158s" podCreationTimestamp="2025-11-26 08:46:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:46:57.132390054 +0000 UTC m=+6718.652531693" watchObservedRunningTime="2025-11-26 08:46:57.132535158 +0000 UTC m=+6718.652676777" Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.125408 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-dddb6ff69-6kv98" event={"ID":"55c1f084-6852-42a7-bbe2-9d9f1ec146dd","Type":"ContainerStarted","Data":"1bf169f4ac53278dc74193137b45ef6a82934c1f2129e7ce4fce44f9fe219736"} Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.127856 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-dddb6ff69-6kv98" event={"ID":"55c1f084-6852-42a7-bbe2-9d9f1ec146dd","Type":"ContainerStarted","Data":"8feed6302a497dbf1353076bb35f3bd4898f8f1e4bb21de367273e4ab31577ee"} Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.128538 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" event={"ID":"4066a907-0a22-41a0-bcda-727d4b7cad23","Type":"ContainerStarted","Data":"0e82425eb1d00ed5b0c2b19d8d399cbb0b7aa2928d720010dfa6953860351c63"} Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.128633 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" event={"ID":"4066a907-0a22-41a0-bcda-727d4b7cad23","Type":"ContainerStarted","Data":"25a19a77c481be645b3d531da03e343389aff0c1996a95d2c89d120493e05f37"} Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.130616 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" event={"ID":"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1","Type":"ContainerStarted","Data":"ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71"} Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.130867 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.171590 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" podStartSLOduration=3.171572724 podStartE2EDuration="3.171572724s" podCreationTimestamp="2025-11-26 08:46:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:46:58.166866884 +0000 UTC m=+6719.687008513" watchObservedRunningTime="2025-11-26 08:46:58.171572724 +0000 UTC m=+6719.691714343" Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.175625 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-dddb6ff69-6kv98" podStartSLOduration=1.872881013 podStartE2EDuration="3.175608322s" podCreationTimestamp="2025-11-26 08:46:55 +0000 UTC" firstStartedPulling="2025-11-26 08:46:56.219676315 +0000 UTC m=+6717.739817934" lastFinishedPulling="2025-11-26 08:46:57.522403624 +0000 UTC m=+6719.042545243" observedRunningTime="2025-11-26 08:46:58.143668787 +0000 UTC m=+6719.663810406" watchObservedRunningTime="2025-11-26 08:46:58.175608322 +0000 UTC m=+6719.695749941" Nov 26 08:46:58 crc kubenswrapper[4940]: I1126 08:46:58.182453 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-f4477dc54-7qvj8" podStartSLOduration=2.154922831 podStartE2EDuration="3.182437129s" podCreationTimestamp="2025-11-26 08:46:55 +0000 UTC" firstStartedPulling="2025-11-26 08:46:56.494859195 +0000 UTC m=+6718.015000804" lastFinishedPulling="2025-11-26 08:46:57.522373483 +0000 UTC m=+6719.042515102" observedRunningTime="2025-11-26 08:46:58.180962973 +0000 UTC m=+6719.701104602" watchObservedRunningTime="2025-11-26 08:46:58.182437129 +0000 UTC m=+6719.702578748" Nov 26 08:46:59 crc kubenswrapper[4940]: I1126 08:46:59.174212 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:46:59 crc kubenswrapper[4940]: E1126 08:46:59.174645 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:47:05 crc kubenswrapper[4940]: I1126 08:47:05.792172 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:47:05 crc kubenswrapper[4940]: I1126 08:47:05.857705 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59c754c49f-q4t8x"] Nov 26 08:47:05 crc kubenswrapper[4940]: I1126 08:47:05.857983 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" podUID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" containerName="dnsmasq-dns" containerID="cri-o://1eba3a5110467e2f3effe39cf8d66a07760b92ab8329b5408d55acf514fa8b40" gracePeriod=10 Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.228942 4940 generic.go:334] "Generic (PLEG): container finished" podID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" containerID="1eba3a5110467e2f3effe39cf8d66a07760b92ab8329b5408d55acf514fa8b40" exitCode=0 Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.229217 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" event={"ID":"757dd3ba-00d6-4226-bfc7-f3e18531acd0","Type":"ContainerDied","Data":"1eba3a5110467e2f3effe39cf8d66a07760b92ab8329b5408d55acf514fa8b40"} Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.313689 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.414932 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-config\") pod \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.414989 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-dns-svc\") pod \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.415028 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-sb\") pod \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.415158 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-nb\") pod \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.415212 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tdsb\" (UniqueName: \"kubernetes.io/projected/757dd3ba-00d6-4226-bfc7-f3e18531acd0-kube-api-access-9tdsb\") pod \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\" (UID: \"757dd3ba-00d6-4226-bfc7-f3e18531acd0\") " Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.437501 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/757dd3ba-00d6-4226-bfc7-f3e18531acd0-kube-api-access-9tdsb" (OuterVolumeSpecName: "kube-api-access-9tdsb") pod "757dd3ba-00d6-4226-bfc7-f3e18531acd0" (UID: "757dd3ba-00d6-4226-bfc7-f3e18531acd0"). InnerVolumeSpecName "kube-api-access-9tdsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.460109 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "757dd3ba-00d6-4226-bfc7-f3e18531acd0" (UID: "757dd3ba-00d6-4226-bfc7-f3e18531acd0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.461093 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "757dd3ba-00d6-4226-bfc7-f3e18531acd0" (UID: "757dd3ba-00d6-4226-bfc7-f3e18531acd0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.462429 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-config" (OuterVolumeSpecName: "config") pod "757dd3ba-00d6-4226-bfc7-f3e18531acd0" (UID: "757dd3ba-00d6-4226-bfc7-f3e18531acd0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.463880 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "757dd3ba-00d6-4226-bfc7-f3e18531acd0" (UID: "757dd3ba-00d6-4226-bfc7-f3e18531acd0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.517200 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.517235 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tdsb\" (UniqueName: \"kubernetes.io/projected/757dd3ba-00d6-4226-bfc7-f3e18531acd0-kube-api-access-9tdsb\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.517247 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.517292 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:06 crc kubenswrapper[4940]: I1126 08:47:06.517307 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/757dd3ba-00d6-4226-bfc7-f3e18531acd0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:07 crc kubenswrapper[4940]: I1126 08:47:07.261928 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" event={"ID":"757dd3ba-00d6-4226-bfc7-f3e18531acd0","Type":"ContainerDied","Data":"91c11a6fae5cc122e844a53305773e83e6b26ab99be504bc83ff5c66893e7113"} Nov 26 08:47:07 crc kubenswrapper[4940]: I1126 08:47:07.262281 4940 scope.go:117] "RemoveContainer" containerID="1eba3a5110467e2f3effe39cf8d66a07760b92ab8329b5408d55acf514fa8b40" Nov 26 08:47:07 crc kubenswrapper[4940]: I1126 08:47:07.262417 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c754c49f-q4t8x" Nov 26 08:47:07 crc kubenswrapper[4940]: I1126 08:47:07.291055 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59c754c49f-q4t8x"] Nov 26 08:47:07 crc kubenswrapper[4940]: I1126 08:47:07.291667 4940 scope.go:117] "RemoveContainer" containerID="75a7e01d4e289311916ae224710632aedbd489f857a84d7cf7f01091affb6630" Nov 26 08:47:07 crc kubenswrapper[4940]: I1126 08:47:07.303272 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59c754c49f-q4t8x"] Nov 26 08:47:07 crc kubenswrapper[4940]: I1126 08:47:07.337146 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:47:07 crc kubenswrapper[4940]: I1126 08:47:07.393489 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-649595bd6-sghk8" Nov 26 08:47:09 crc kubenswrapper[4940]: I1126 08:47:09.175877 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" path="/var/lib/kubelet/pods/757dd3ba-00d6-4226-bfc7-f3e18531acd0/volumes" Nov 26 08:47:10 crc kubenswrapper[4940]: I1126 08:47:10.165669 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:47:10 crc kubenswrapper[4940]: E1126 08:47:10.166489 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.713101 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-n9lc2"] Nov 26 08:47:13 crc kubenswrapper[4940]: E1126 08:47:13.713987 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" containerName="dnsmasq-dns" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.714004 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" containerName="dnsmasq-dns" Nov 26 08:47:13 crc kubenswrapper[4940]: E1126 08:47:13.714024 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" containerName="init" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.714031 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" containerName="init" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.714252 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="757dd3ba-00d6-4226-bfc7-f3e18531acd0" containerName="dnsmasq-dns" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.714991 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.725674 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-n9lc2"] Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.852724 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bww6\" (UniqueName: \"kubernetes.io/projected/fe133454-afe8-4208-ba1a-86f87d1a0837-kube-api-access-2bww6\") pod \"neutron-db-create-n9lc2\" (UID: \"fe133454-afe8-4208-ba1a-86f87d1a0837\") " pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.852923 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe133454-afe8-4208-ba1a-86f87d1a0837-operator-scripts\") pod \"neutron-db-create-n9lc2\" (UID: \"fe133454-afe8-4208-ba1a-86f87d1a0837\") " pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.925554 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-47c1-account-create-update-xqm5m"] Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.928055 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.930532 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.936578 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-47c1-account-create-update-xqm5m"] Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.956221 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe133454-afe8-4208-ba1a-86f87d1a0837-operator-scripts\") pod \"neutron-db-create-n9lc2\" (UID: \"fe133454-afe8-4208-ba1a-86f87d1a0837\") " pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.956302 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bww6\" (UniqueName: \"kubernetes.io/projected/fe133454-afe8-4208-ba1a-86f87d1a0837-kube-api-access-2bww6\") pod \"neutron-db-create-n9lc2\" (UID: \"fe133454-afe8-4208-ba1a-86f87d1a0837\") " pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.960294 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe133454-afe8-4208-ba1a-86f87d1a0837-operator-scripts\") pod \"neutron-db-create-n9lc2\" (UID: \"fe133454-afe8-4208-ba1a-86f87d1a0837\") " pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:13 crc kubenswrapper[4940]: I1126 08:47:13.978847 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bww6\" (UniqueName: \"kubernetes.io/projected/fe133454-afe8-4208-ba1a-86f87d1a0837-kube-api-access-2bww6\") pod \"neutron-db-create-n9lc2\" (UID: \"fe133454-afe8-4208-ba1a-86f87d1a0837\") " pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.042678 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.058107 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf52k\" (UniqueName: \"kubernetes.io/projected/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-kube-api-access-nf52k\") pod \"neutron-47c1-account-create-update-xqm5m\" (UID: \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\") " pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.058269 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-operator-scripts\") pod \"neutron-47c1-account-create-update-xqm5m\" (UID: \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\") " pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.159778 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-operator-scripts\") pod \"neutron-47c1-account-create-update-xqm5m\" (UID: \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\") " pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.160367 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf52k\" (UniqueName: \"kubernetes.io/projected/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-kube-api-access-nf52k\") pod \"neutron-47c1-account-create-update-xqm5m\" (UID: \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\") " pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.160726 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-operator-scripts\") pod \"neutron-47c1-account-create-update-xqm5m\" (UID: \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\") " pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.178410 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf52k\" (UniqueName: \"kubernetes.io/projected/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-kube-api-access-nf52k\") pod \"neutron-47c1-account-create-update-xqm5m\" (UID: \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\") " pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.247175 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.287697 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-n9lc2"] Nov 26 08:47:14 crc kubenswrapper[4940]: W1126 08:47:14.291820 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe133454_afe8_4208_ba1a_86f87d1a0837.slice/crio-931b2ca6ba6642d81a7cee3dae0e33a17ae70302835335a06a71754c5df0a194 WatchSource:0}: Error finding container 931b2ca6ba6642d81a7cee3dae0e33a17ae70302835335a06a71754c5df0a194: Status 404 returned error can't find the container with id 931b2ca6ba6642d81a7cee3dae0e33a17ae70302835335a06a71754c5df0a194 Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.328323 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-n9lc2" event={"ID":"fe133454-afe8-4208-ba1a-86f87d1a0837","Type":"ContainerStarted","Data":"931b2ca6ba6642d81a7cee3dae0e33a17ae70302835335a06a71754c5df0a194"} Nov 26 08:47:14 crc kubenswrapper[4940]: I1126 08:47:14.659559 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-47c1-account-create-update-xqm5m"] Nov 26 08:47:14 crc kubenswrapper[4940]: W1126 08:47:14.686971 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a9e3e01_69f3_4d3c_8586_2f70ca4083d1.slice/crio-c74a8ae2562bed85aeb9464af0f4d65bbb92cbade628791dc43e5d28001a93a4 WatchSource:0}: Error finding container c74a8ae2562bed85aeb9464af0f4d65bbb92cbade628791dc43e5d28001a93a4: Status 404 returned error can't find the container with id c74a8ae2562bed85aeb9464af0f4d65bbb92cbade628791dc43e5d28001a93a4 Nov 26 08:47:15 crc kubenswrapper[4940]: I1126 08:47:15.339525 4940 generic.go:334] "Generic (PLEG): container finished" podID="7a9e3e01-69f3-4d3c-8586-2f70ca4083d1" containerID="969d72976e6ab72bf75e90b8ea97f84d0d5cb46ed5eb5d3a5b241bd66f66cc3a" exitCode=0 Nov 26 08:47:15 crc kubenswrapper[4940]: I1126 08:47:15.339637 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-47c1-account-create-update-xqm5m" event={"ID":"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1","Type":"ContainerDied","Data":"969d72976e6ab72bf75e90b8ea97f84d0d5cb46ed5eb5d3a5b241bd66f66cc3a"} Nov 26 08:47:15 crc kubenswrapper[4940]: I1126 08:47:15.340008 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-47c1-account-create-update-xqm5m" event={"ID":"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1","Type":"ContainerStarted","Data":"c74a8ae2562bed85aeb9464af0f4d65bbb92cbade628791dc43e5d28001a93a4"} Nov 26 08:47:15 crc kubenswrapper[4940]: I1126 08:47:15.343693 4940 generic.go:334] "Generic (PLEG): container finished" podID="fe133454-afe8-4208-ba1a-86f87d1a0837" containerID="4918a2616cf200fcc9466f791913a5a17664b73fbdc23a0e59115e97ae89ebcf" exitCode=0 Nov 26 08:47:15 crc kubenswrapper[4940]: I1126 08:47:15.343777 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-n9lc2" event={"ID":"fe133454-afe8-4208-ba1a-86f87d1a0837","Type":"ContainerDied","Data":"4918a2616cf200fcc9466f791913a5a17664b73fbdc23a0e59115e97ae89ebcf"} Nov 26 08:47:16 crc kubenswrapper[4940]: I1126 08:47:16.850642 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:16 crc kubenswrapper[4940]: I1126 08:47:16.859070 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.009218 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nf52k\" (UniqueName: \"kubernetes.io/projected/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-kube-api-access-nf52k\") pod \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\" (UID: \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\") " Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.009496 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe133454-afe8-4208-ba1a-86f87d1a0837-operator-scripts\") pod \"fe133454-afe8-4208-ba1a-86f87d1a0837\" (UID: \"fe133454-afe8-4208-ba1a-86f87d1a0837\") " Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.009531 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-operator-scripts\") pod \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\" (UID: \"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1\") " Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.009563 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bww6\" (UniqueName: \"kubernetes.io/projected/fe133454-afe8-4208-ba1a-86f87d1a0837-kube-api-access-2bww6\") pod \"fe133454-afe8-4208-ba1a-86f87d1a0837\" (UID: \"fe133454-afe8-4208-ba1a-86f87d1a0837\") " Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.010260 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7a9e3e01-69f3-4d3c-8586-2f70ca4083d1" (UID: "7a9e3e01-69f3-4d3c-8586-2f70ca4083d1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.010306 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe133454-afe8-4208-ba1a-86f87d1a0837-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fe133454-afe8-4208-ba1a-86f87d1a0837" (UID: "fe133454-afe8-4208-ba1a-86f87d1a0837"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.014643 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-kube-api-access-nf52k" (OuterVolumeSpecName: "kube-api-access-nf52k") pod "7a9e3e01-69f3-4d3c-8586-2f70ca4083d1" (UID: "7a9e3e01-69f3-4d3c-8586-2f70ca4083d1"). InnerVolumeSpecName "kube-api-access-nf52k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.015689 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe133454-afe8-4208-ba1a-86f87d1a0837-kube-api-access-2bww6" (OuterVolumeSpecName: "kube-api-access-2bww6") pod "fe133454-afe8-4208-ba1a-86f87d1a0837" (UID: "fe133454-afe8-4208-ba1a-86f87d1a0837"). InnerVolumeSpecName "kube-api-access-2bww6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.111181 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nf52k\" (UniqueName: \"kubernetes.io/projected/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-kube-api-access-nf52k\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.111211 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe133454-afe8-4208-ba1a-86f87d1a0837-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.111220 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.111229 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bww6\" (UniqueName: \"kubernetes.io/projected/fe133454-afe8-4208-ba1a-86f87d1a0837-kube-api-access-2bww6\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.389241 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-47c1-account-create-update-xqm5m" event={"ID":"7a9e3e01-69f3-4d3c-8586-2f70ca4083d1","Type":"ContainerDied","Data":"c74a8ae2562bed85aeb9464af0f4d65bbb92cbade628791dc43e5d28001a93a4"} Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.389302 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c74a8ae2562bed85aeb9464af0f4d65bbb92cbade628791dc43e5d28001a93a4" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.389314 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-47c1-account-create-update-xqm5m" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.391172 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-n9lc2" event={"ID":"fe133454-afe8-4208-ba1a-86f87d1a0837","Type":"ContainerDied","Data":"931b2ca6ba6642d81a7cee3dae0e33a17ae70302835335a06a71754c5df0a194"} Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.391251 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="931b2ca6ba6642d81a7cee3dae0e33a17ae70302835335a06a71754c5df0a194" Nov 26 08:47:17 crc kubenswrapper[4940]: I1126 08:47:17.391205 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-n9lc2" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.161868 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-z78qf"] Nov 26 08:47:19 crc kubenswrapper[4940]: E1126 08:47:19.162641 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe133454-afe8-4208-ba1a-86f87d1a0837" containerName="mariadb-database-create" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.162659 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe133454-afe8-4208-ba1a-86f87d1a0837" containerName="mariadb-database-create" Nov 26 08:47:19 crc kubenswrapper[4940]: E1126 08:47:19.162673 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a9e3e01-69f3-4d3c-8586-2f70ca4083d1" containerName="mariadb-account-create-update" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.162681 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a9e3e01-69f3-4d3c-8586-2f70ca4083d1" containerName="mariadb-account-create-update" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.162881 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe133454-afe8-4208-ba1a-86f87d1a0837" containerName="mariadb-database-create" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.162913 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a9e3e01-69f3-4d3c-8586-2f70ca4083d1" containerName="mariadb-account-create-update" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.163612 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.165574 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.165687 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-5h7lw" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.165739 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.185079 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-z78qf"] Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.247742 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-combined-ca-bundle\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.247847 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zl7c\" (UniqueName: \"kubernetes.io/projected/a3d251fc-2e47-4381-bb73-05ff1a5753b2-kube-api-access-5zl7c\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.247924 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-config\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.349618 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-combined-ca-bundle\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.349676 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zl7c\" (UniqueName: \"kubernetes.io/projected/a3d251fc-2e47-4381-bb73-05ff1a5753b2-kube-api-access-5zl7c\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.349729 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-config\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.356445 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-config\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.356527 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-combined-ca-bundle\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.371352 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zl7c\" (UniqueName: \"kubernetes.io/projected/a3d251fc-2e47-4381-bb73-05ff1a5753b2-kube-api-access-5zl7c\") pod \"neutron-db-sync-z78qf\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.486449 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:19 crc kubenswrapper[4940]: I1126 08:47:19.949312 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-z78qf"] Nov 26 08:47:19 crc kubenswrapper[4940]: W1126 08:47:19.955251 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3d251fc_2e47_4381_bb73_05ff1a5753b2.slice/crio-05bdeb9d2389e3b8146e325507102ddff168846bd0d7416b56c899bc4d6368d1 WatchSource:0}: Error finding container 05bdeb9d2389e3b8146e325507102ddff168846bd0d7416b56c899bc4d6368d1: Status 404 returned error can't find the container with id 05bdeb9d2389e3b8146e325507102ddff168846bd0d7416b56c899bc4d6368d1 Nov 26 08:47:20 crc kubenswrapper[4940]: I1126 08:47:20.417859 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-z78qf" event={"ID":"a3d251fc-2e47-4381-bb73-05ff1a5753b2","Type":"ContainerStarted","Data":"a9650a5084fadb53159d17654cf524e8da140a51c8978bdb9a87f8f6a2d6339f"} Nov 26 08:47:20 crc kubenswrapper[4940]: I1126 08:47:20.417919 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-z78qf" event={"ID":"a3d251fc-2e47-4381-bb73-05ff1a5753b2","Type":"ContainerStarted","Data":"05bdeb9d2389e3b8146e325507102ddff168846bd0d7416b56c899bc4d6368d1"} Nov 26 08:47:20 crc kubenswrapper[4940]: I1126 08:47:20.440656 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-z78qf" podStartSLOduration=1.4406331319999999 podStartE2EDuration="1.440633132s" podCreationTimestamp="2025-11-26 08:47:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:47:20.432745241 +0000 UTC m=+6741.952886900" watchObservedRunningTime="2025-11-26 08:47:20.440633132 +0000 UTC m=+6741.960774751" Nov 26 08:47:22 crc kubenswrapper[4940]: I1126 08:47:22.166706 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:47:22 crc kubenswrapper[4940]: E1126 08:47:22.167197 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:47:24 crc kubenswrapper[4940]: I1126 08:47:24.463922 4940 generic.go:334] "Generic (PLEG): container finished" podID="a3d251fc-2e47-4381-bb73-05ff1a5753b2" containerID="a9650a5084fadb53159d17654cf524e8da140a51c8978bdb9a87f8f6a2d6339f" exitCode=0 Nov 26 08:47:24 crc kubenswrapper[4940]: I1126 08:47:24.464094 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-z78qf" event={"ID":"a3d251fc-2e47-4381-bb73-05ff1a5753b2","Type":"ContainerDied","Data":"a9650a5084fadb53159d17654cf524e8da140a51c8978bdb9a87f8f6a2d6339f"} Nov 26 08:47:25 crc kubenswrapper[4940]: I1126 08:47:25.818575 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:25 crc kubenswrapper[4940]: I1126 08:47:25.972075 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zl7c\" (UniqueName: \"kubernetes.io/projected/a3d251fc-2e47-4381-bb73-05ff1a5753b2-kube-api-access-5zl7c\") pod \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " Nov 26 08:47:25 crc kubenswrapper[4940]: I1126 08:47:25.972222 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-config\") pod \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " Nov 26 08:47:25 crc kubenswrapper[4940]: I1126 08:47:25.972316 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-combined-ca-bundle\") pod \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\" (UID: \"a3d251fc-2e47-4381-bb73-05ff1a5753b2\") " Nov 26 08:47:25 crc kubenswrapper[4940]: I1126 08:47:25.977344 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3d251fc-2e47-4381-bb73-05ff1a5753b2-kube-api-access-5zl7c" (OuterVolumeSpecName: "kube-api-access-5zl7c") pod "a3d251fc-2e47-4381-bb73-05ff1a5753b2" (UID: "a3d251fc-2e47-4381-bb73-05ff1a5753b2"). InnerVolumeSpecName "kube-api-access-5zl7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.001989 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-config" (OuterVolumeSpecName: "config") pod "a3d251fc-2e47-4381-bb73-05ff1a5753b2" (UID: "a3d251fc-2e47-4381-bb73-05ff1a5753b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.011569 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3d251fc-2e47-4381-bb73-05ff1a5753b2" (UID: "a3d251fc-2e47-4381-bb73-05ff1a5753b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.073671 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.073731 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zl7c\" (UniqueName: \"kubernetes.io/projected/a3d251fc-2e47-4381-bb73-05ff1a5753b2-kube-api-access-5zl7c\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.073744 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a3d251fc-2e47-4381-bb73-05ff1a5753b2-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.486928 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-z78qf" event={"ID":"a3d251fc-2e47-4381-bb73-05ff1a5753b2","Type":"ContainerDied","Data":"05bdeb9d2389e3b8146e325507102ddff168846bd0d7416b56c899bc4d6368d1"} Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.486972 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05bdeb9d2389e3b8146e325507102ddff168846bd0d7416b56c899bc4d6368d1" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.487082 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-z78qf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.721849 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6564d966f9-rdpnp"] Nov 26 08:47:26 crc kubenswrapper[4940]: E1126 08:47:26.722484 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3d251fc-2e47-4381-bb73-05ff1a5753b2" containerName="neutron-db-sync" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.722506 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3d251fc-2e47-4381-bb73-05ff1a5753b2" containerName="neutron-db-sync" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.722681 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3d251fc-2e47-4381-bb73-05ff1a5753b2" containerName="neutron-db-sync" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.723526 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.746480 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6564d966f9-rdpnp"] Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.787547 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dd68f5c47-c8tlf"] Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.788953 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.792749 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.792925 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.793532 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-5h7lw" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.806103 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dd68f5c47-c8tlf"] Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890155 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ctrq\" (UniqueName: \"kubernetes.io/projected/e9a5605c-a7bf-4d94-aab2-053385ccd488-kube-api-access-2ctrq\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890235 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-nb\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890286 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpzvr\" (UniqueName: \"kubernetes.io/projected/b849da81-4187-4d5e-9077-d6f3affd46f0-kube-api-access-mpzvr\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890311 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-combined-ca-bundle\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890455 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-config\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890500 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-dns-svc\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890525 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-httpd-config\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890593 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-config\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.890726 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-sb\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.992529 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-sb\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.992598 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ctrq\" (UniqueName: \"kubernetes.io/projected/e9a5605c-a7bf-4d94-aab2-053385ccd488-kube-api-access-2ctrq\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.992633 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-nb\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.992663 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpzvr\" (UniqueName: \"kubernetes.io/projected/b849da81-4187-4d5e-9077-d6f3affd46f0-kube-api-access-mpzvr\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.992684 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-combined-ca-bundle\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.992735 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-config\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.992761 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-dns-svc\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.992781 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-httpd-config\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.993555 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-config\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.993595 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-sb\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.993610 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-nb\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.993720 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-dns-svc\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.993833 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-config\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.997776 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-config\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:26 crc kubenswrapper[4940]: I1126 08:47:26.998669 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-combined-ca-bundle\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:27 crc kubenswrapper[4940]: I1126 08:47:27.003884 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e9a5605c-a7bf-4d94-aab2-053385ccd488-httpd-config\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:27 crc kubenswrapper[4940]: I1126 08:47:27.020281 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ctrq\" (UniqueName: \"kubernetes.io/projected/e9a5605c-a7bf-4d94-aab2-053385ccd488-kube-api-access-2ctrq\") pod \"neutron-dd68f5c47-c8tlf\" (UID: \"e9a5605c-a7bf-4d94-aab2-053385ccd488\") " pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:27 crc kubenswrapper[4940]: I1126 08:47:27.023992 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpzvr\" (UniqueName: \"kubernetes.io/projected/b849da81-4187-4d5e-9077-d6f3affd46f0-kube-api-access-mpzvr\") pod \"dnsmasq-dns-6564d966f9-rdpnp\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:27 crc kubenswrapper[4940]: I1126 08:47:27.058611 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:27 crc kubenswrapper[4940]: I1126 08:47:27.115092 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:27 crc kubenswrapper[4940]: I1126 08:47:27.724339 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6564d966f9-rdpnp"] Nov 26 08:47:27 crc kubenswrapper[4940]: W1126 08:47:27.749443 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9a5605c_a7bf_4d94_aab2_053385ccd488.slice/crio-998642ee54b57e63f46ef6545a58b4e3172a98db9878d60052b0bed9a71aca2f WatchSource:0}: Error finding container 998642ee54b57e63f46ef6545a58b4e3172a98db9878d60052b0bed9a71aca2f: Status 404 returned error can't find the container with id 998642ee54b57e63f46ef6545a58b4e3172a98db9878d60052b0bed9a71aca2f Nov 26 08:47:27 crc kubenswrapper[4940]: I1126 08:47:27.750979 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dd68f5c47-c8tlf"] Nov 26 08:47:28 crc kubenswrapper[4940]: I1126 08:47:28.503107 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dd68f5c47-c8tlf" event={"ID":"e9a5605c-a7bf-4d94-aab2-053385ccd488","Type":"ContainerStarted","Data":"49101264a4644f6072c822299f999a71295a1e4d4029f646f1abe96f2f73ab61"} Nov 26 08:47:28 crc kubenswrapper[4940]: I1126 08:47:28.503613 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:28 crc kubenswrapper[4940]: I1126 08:47:28.503626 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dd68f5c47-c8tlf" event={"ID":"e9a5605c-a7bf-4d94-aab2-053385ccd488","Type":"ContainerStarted","Data":"5ac3b8b25793e9423fb1a793f17b9a61d8ae28914b552fce25ad164a7f998644"} Nov 26 08:47:28 crc kubenswrapper[4940]: I1126 08:47:28.503637 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dd68f5c47-c8tlf" event={"ID":"e9a5605c-a7bf-4d94-aab2-053385ccd488","Type":"ContainerStarted","Data":"998642ee54b57e63f46ef6545a58b4e3172a98db9878d60052b0bed9a71aca2f"} Nov 26 08:47:28 crc kubenswrapper[4940]: I1126 08:47:28.505392 4940 generic.go:334] "Generic (PLEG): container finished" podID="b849da81-4187-4d5e-9077-d6f3affd46f0" containerID="01ea5669a82f2c9ceb860a26d54c3e24f7d5083c7165f5a711cff997ca16ff6b" exitCode=0 Nov 26 08:47:28 crc kubenswrapper[4940]: I1126 08:47:28.505442 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" event={"ID":"b849da81-4187-4d5e-9077-d6f3affd46f0","Type":"ContainerDied","Data":"01ea5669a82f2c9ceb860a26d54c3e24f7d5083c7165f5a711cff997ca16ff6b"} Nov 26 08:47:28 crc kubenswrapper[4940]: I1126 08:47:28.505504 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" event={"ID":"b849da81-4187-4d5e-9077-d6f3affd46f0","Type":"ContainerStarted","Data":"ac85a7325bf1fc7030dba8c63d01d268402e8058b351764cbad683c67533ab6d"} Nov 26 08:47:28 crc kubenswrapper[4940]: I1126 08:47:28.531862 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dd68f5c47-c8tlf" podStartSLOduration=2.5318396720000003 podStartE2EDuration="2.531839672s" podCreationTimestamp="2025-11-26 08:47:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:47:28.526810632 +0000 UTC m=+6750.046952241" watchObservedRunningTime="2025-11-26 08:47:28.531839672 +0000 UTC m=+6750.051981311" Nov 26 08:47:29 crc kubenswrapper[4940]: I1126 08:47:29.514492 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" event={"ID":"b849da81-4187-4d5e-9077-d6f3affd46f0","Type":"ContainerStarted","Data":"c8992018d04e40e16e5d12c0c843c39118f802f8850f42cb6b9f571e550497f4"} Nov 26 08:47:30 crc kubenswrapper[4940]: I1126 08:47:30.524761 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:33 crc kubenswrapper[4940]: I1126 08:47:33.165536 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:47:33 crc kubenswrapper[4940]: E1126 08:47:33.166519 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.061234 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.091891 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" podStartSLOduration=11.091872368 podStartE2EDuration="11.091872368s" podCreationTimestamp="2025-11-26 08:47:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:47:29.539544171 +0000 UTC m=+6751.059685790" watchObservedRunningTime="2025-11-26 08:47:37.091872368 +0000 UTC m=+6758.612013987" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.132140 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84cdc5c6c-h99bn"] Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.132694 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" podUID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" containerName="dnsmasq-dns" containerID="cri-o://ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71" gracePeriod=10 Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.594819 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.612928 4940 generic.go:334] "Generic (PLEG): container finished" podID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" containerID="ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71" exitCode=0 Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.612986 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" event={"ID":"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1","Type":"ContainerDied","Data":"ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71"} Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.613022 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" event={"ID":"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1","Type":"ContainerDied","Data":"f5e41d3a1a060edb8e8809b455f07abfe66518b8ef63071a4d097094bf077201"} Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.613078 4940 scope.go:117] "RemoveContainer" containerID="ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.613248 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84cdc5c6c-h99bn" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.633750 4940 scope.go:117] "RemoveContainer" containerID="2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.672520 4940 scope.go:117] "RemoveContainer" containerID="ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71" Nov 26 08:47:37 crc kubenswrapper[4940]: E1126 08:47:37.674513 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71\": container with ID starting with ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71 not found: ID does not exist" containerID="ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.674551 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71"} err="failed to get container status \"ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71\": rpc error: code = NotFound desc = could not find container \"ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71\": container with ID starting with ff53f7655b0a6339091e921e4475c9664f079496e5b7dd816997d47e1179ac71 not found: ID does not exist" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.674572 4940 scope.go:117] "RemoveContainer" containerID="2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73" Nov 26 08:47:37 crc kubenswrapper[4940]: E1126 08:47:37.678868 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73\": container with ID starting with 2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73 not found: ID does not exist" containerID="2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.678906 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73"} err="failed to get container status \"2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73\": rpc error: code = NotFound desc = could not find container \"2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73\": container with ID starting with 2e9202e1e88e21b5a353b84e0e20ddf6485684036a425bdeab01aacb17d88d73 not found: ID does not exist" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.691536 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpwtm\" (UniqueName: \"kubernetes.io/projected/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-kube-api-access-jpwtm\") pod \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.691599 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-nb\") pod \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.691669 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-dns-svc\") pod \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.691726 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-config\") pod \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.691812 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-sb\") pod \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\" (UID: \"a4fd24aa-3421-4c2c-b5e2-e6401483e4b1\") " Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.735249 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-kube-api-access-jpwtm" (OuterVolumeSpecName: "kube-api-access-jpwtm") pod "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" (UID: "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1"). InnerVolumeSpecName "kube-api-access-jpwtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.793487 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpwtm\" (UniqueName: \"kubernetes.io/projected/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-kube-api-access-jpwtm\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.797348 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-config" (OuterVolumeSpecName: "config") pod "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" (UID: "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.803420 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" (UID: "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.808127 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" (UID: "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.825068 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" (UID: "a4fd24aa-3421-4c2c-b5e2-e6401483e4b1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.895448 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.895485 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.895494 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.895502 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.943286 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84cdc5c6c-h99bn"] Nov 26 08:47:37 crc kubenswrapper[4940]: I1126 08:47:37.950860 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84cdc5c6c-h99bn"] Nov 26 08:47:39 crc kubenswrapper[4940]: I1126 08:47:39.176530 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" path="/var/lib/kubelet/pods/a4fd24aa-3421-4c2c-b5e2-e6401483e4b1/volumes" Nov 26 08:47:45 crc kubenswrapper[4940]: I1126 08:47:45.165625 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:47:45 crc kubenswrapper[4940]: E1126 08:47:45.166252 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:47:57 crc kubenswrapper[4940]: I1126 08:47:57.123081 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-dd68f5c47-c8tlf" Nov 26 08:47:58 crc kubenswrapper[4940]: I1126 08:47:58.166253 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:47:58 crc kubenswrapper[4940]: E1126 08:47:58.166851 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:48:04 crc kubenswrapper[4940]: I1126 08:48:04.996474 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-zqh8w"] Nov 26 08:48:04 crc kubenswrapper[4940]: E1126 08:48:04.997577 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" containerName="init" Nov 26 08:48:04 crc kubenswrapper[4940]: I1126 08:48:04.997594 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" containerName="init" Nov 26 08:48:04 crc kubenswrapper[4940]: E1126 08:48:04.997612 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" containerName="dnsmasq-dns" Nov 26 08:48:04 crc kubenswrapper[4940]: I1126 08:48:04.997620 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" containerName="dnsmasq-dns" Nov 26 08:48:04 crc kubenswrapper[4940]: I1126 08:48:04.997887 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4fd24aa-3421-4c2c-b5e2-e6401483e4b1" containerName="dnsmasq-dns" Nov 26 08:48:04 crc kubenswrapper[4940]: I1126 08:48:04.998609 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.007718 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-aca1-account-create-update-p795q"] Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.008996 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.017000 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-zqh8w"] Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.048440 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-operator-scripts\") pod \"glance-aca1-account-create-update-p795q\" (UID: \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\") " pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.048740 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/612f623d-62d5-4722-b290-7d2ab5cb4795-operator-scripts\") pod \"glance-db-create-zqh8w\" (UID: \"612f623d-62d5-4722-b290-7d2ab5cb4795\") " pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.048780 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76dpd\" (UniqueName: \"kubernetes.io/projected/612f623d-62d5-4722-b290-7d2ab5cb4795-kube-api-access-76dpd\") pod \"glance-db-create-zqh8w\" (UID: \"612f623d-62d5-4722-b290-7d2ab5cb4795\") " pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.048887 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cs8zj\" (UniqueName: \"kubernetes.io/projected/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-kube-api-access-cs8zj\") pod \"glance-aca1-account-create-update-p795q\" (UID: \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\") " pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.049633 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.071523 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-aca1-account-create-update-p795q"] Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.149461 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-operator-scripts\") pod \"glance-aca1-account-create-update-p795q\" (UID: \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\") " pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.149596 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/612f623d-62d5-4722-b290-7d2ab5cb4795-operator-scripts\") pod \"glance-db-create-zqh8w\" (UID: \"612f623d-62d5-4722-b290-7d2ab5cb4795\") " pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.149622 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76dpd\" (UniqueName: \"kubernetes.io/projected/612f623d-62d5-4722-b290-7d2ab5cb4795-kube-api-access-76dpd\") pod \"glance-db-create-zqh8w\" (UID: \"612f623d-62d5-4722-b290-7d2ab5cb4795\") " pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.149688 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cs8zj\" (UniqueName: \"kubernetes.io/projected/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-kube-api-access-cs8zj\") pod \"glance-aca1-account-create-update-p795q\" (UID: \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\") " pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.150283 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-operator-scripts\") pod \"glance-aca1-account-create-update-p795q\" (UID: \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\") " pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.150649 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/612f623d-62d5-4722-b290-7d2ab5cb4795-operator-scripts\") pod \"glance-db-create-zqh8w\" (UID: \"612f623d-62d5-4722-b290-7d2ab5cb4795\") " pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.169315 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cs8zj\" (UniqueName: \"kubernetes.io/projected/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-kube-api-access-cs8zj\") pod \"glance-aca1-account-create-update-p795q\" (UID: \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\") " pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.175451 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76dpd\" (UniqueName: \"kubernetes.io/projected/612f623d-62d5-4722-b290-7d2ab5cb4795-kube-api-access-76dpd\") pod \"glance-db-create-zqh8w\" (UID: \"612f623d-62d5-4722-b290-7d2ab5cb4795\") " pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.374383 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.387484 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.804023 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-aca1-account-create-update-p795q"] Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.885779 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-aca1-account-create-update-p795q" event={"ID":"a7f2a933-405d-4a9b-892f-d2fff3a10bf6","Type":"ContainerStarted","Data":"670f455981ac5409516e9225d681f0318ca7de1b2c4088e0029340dd858e33ec"} Nov 26 08:48:05 crc kubenswrapper[4940]: I1126 08:48:05.895605 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-zqh8w"] Nov 26 08:48:05 crc kubenswrapper[4940]: W1126 08:48:05.907575 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod612f623d_62d5_4722_b290_7d2ab5cb4795.slice/crio-cb10b804ae61bc213cc676d3938f33e64ada3a8888cb48eafacbc0ea363ccb14 WatchSource:0}: Error finding container cb10b804ae61bc213cc676d3938f33e64ada3a8888cb48eafacbc0ea363ccb14: Status 404 returned error can't find the container with id cb10b804ae61bc213cc676d3938f33e64ada3a8888cb48eafacbc0ea363ccb14 Nov 26 08:48:06 crc kubenswrapper[4940]: I1126 08:48:06.905182 4940 generic.go:334] "Generic (PLEG): container finished" podID="612f623d-62d5-4722-b290-7d2ab5cb4795" containerID="5aa31e432606e2c15765302595b39ea553f09f834afdabc97d25da38d7694bec" exitCode=0 Nov 26 08:48:06 crc kubenswrapper[4940]: I1126 08:48:06.905690 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zqh8w" event={"ID":"612f623d-62d5-4722-b290-7d2ab5cb4795","Type":"ContainerDied","Data":"5aa31e432606e2c15765302595b39ea553f09f834afdabc97d25da38d7694bec"} Nov 26 08:48:06 crc kubenswrapper[4940]: I1126 08:48:06.905754 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zqh8w" event={"ID":"612f623d-62d5-4722-b290-7d2ab5cb4795","Type":"ContainerStarted","Data":"cb10b804ae61bc213cc676d3938f33e64ada3a8888cb48eafacbc0ea363ccb14"} Nov 26 08:48:06 crc kubenswrapper[4940]: I1126 08:48:06.908388 4940 generic.go:334] "Generic (PLEG): container finished" podID="a7f2a933-405d-4a9b-892f-d2fff3a10bf6" containerID="ee2ea4281e2b1df972f0ed2a2f2be7f68fca3ad07bd5a1706816358e021f7412" exitCode=0 Nov 26 08:48:06 crc kubenswrapper[4940]: I1126 08:48:06.908481 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-aca1-account-create-update-p795q" event={"ID":"a7f2a933-405d-4a9b-892f-d2fff3a10bf6","Type":"ContainerDied","Data":"ee2ea4281e2b1df972f0ed2a2f2be7f68fca3ad07bd5a1706816358e021f7412"} Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.282022 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.289346 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.426408 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76dpd\" (UniqueName: \"kubernetes.io/projected/612f623d-62d5-4722-b290-7d2ab5cb4795-kube-api-access-76dpd\") pod \"612f623d-62d5-4722-b290-7d2ab5cb4795\" (UID: \"612f623d-62d5-4722-b290-7d2ab5cb4795\") " Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.426493 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-operator-scripts\") pod \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\" (UID: \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\") " Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.426570 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cs8zj\" (UniqueName: \"kubernetes.io/projected/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-kube-api-access-cs8zj\") pod \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\" (UID: \"a7f2a933-405d-4a9b-892f-d2fff3a10bf6\") " Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.426592 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/612f623d-62d5-4722-b290-7d2ab5cb4795-operator-scripts\") pod \"612f623d-62d5-4722-b290-7d2ab5cb4795\" (UID: \"612f623d-62d5-4722-b290-7d2ab5cb4795\") " Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.427757 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/612f623d-62d5-4722-b290-7d2ab5cb4795-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "612f623d-62d5-4722-b290-7d2ab5cb4795" (UID: "612f623d-62d5-4722-b290-7d2ab5cb4795"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.427760 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a7f2a933-405d-4a9b-892f-d2fff3a10bf6" (UID: "a7f2a933-405d-4a9b-892f-d2fff3a10bf6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.432237 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/612f623d-62d5-4722-b290-7d2ab5cb4795-kube-api-access-76dpd" (OuterVolumeSpecName: "kube-api-access-76dpd") pod "612f623d-62d5-4722-b290-7d2ab5cb4795" (UID: "612f623d-62d5-4722-b290-7d2ab5cb4795"). InnerVolumeSpecName "kube-api-access-76dpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.433578 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-kube-api-access-cs8zj" (OuterVolumeSpecName: "kube-api-access-cs8zj") pod "a7f2a933-405d-4a9b-892f-d2fff3a10bf6" (UID: "a7f2a933-405d-4a9b-892f-d2fff3a10bf6"). InnerVolumeSpecName "kube-api-access-cs8zj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.528621 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76dpd\" (UniqueName: \"kubernetes.io/projected/612f623d-62d5-4722-b290-7d2ab5cb4795-kube-api-access-76dpd\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.528671 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.528683 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cs8zj\" (UniqueName: \"kubernetes.io/projected/a7f2a933-405d-4a9b-892f-d2fff3a10bf6-kube-api-access-cs8zj\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.528695 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/612f623d-62d5-4722-b290-7d2ab5cb4795-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.929749 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zqh8w" event={"ID":"612f623d-62d5-4722-b290-7d2ab5cb4795","Type":"ContainerDied","Data":"cb10b804ae61bc213cc676d3938f33e64ada3a8888cb48eafacbc0ea363ccb14"} Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.930186 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb10b804ae61bc213cc676d3938f33e64ada3a8888cb48eafacbc0ea363ccb14" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.929806 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zqh8w" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.931436 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-aca1-account-create-update-p795q" event={"ID":"a7f2a933-405d-4a9b-892f-d2fff3a10bf6","Type":"ContainerDied","Data":"670f455981ac5409516e9225d681f0318ca7de1b2c4088e0029340dd858e33ec"} Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.931456 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-aca1-account-create-update-p795q" Nov 26 08:48:08 crc kubenswrapper[4940]: I1126 08:48:08.931471 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="670f455981ac5409516e9225d681f0318ca7de1b2c4088e0029340dd858e33ec" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.238886 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-bdsxv"] Nov 26 08:48:10 crc kubenswrapper[4940]: E1126 08:48:10.242508 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7f2a933-405d-4a9b-892f-d2fff3a10bf6" containerName="mariadb-account-create-update" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.242538 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7f2a933-405d-4a9b-892f-d2fff3a10bf6" containerName="mariadb-account-create-update" Nov 26 08:48:10 crc kubenswrapper[4940]: E1126 08:48:10.242554 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="612f623d-62d5-4722-b290-7d2ab5cb4795" containerName="mariadb-database-create" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.242561 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="612f623d-62d5-4722-b290-7d2ab5cb4795" containerName="mariadb-database-create" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.242734 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7f2a933-405d-4a9b-892f-d2fff3a10bf6" containerName="mariadb-account-create-update" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.242752 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="612f623d-62d5-4722-b290-7d2ab5cb4795" containerName="mariadb-database-create" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.243516 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.246465 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6cn6v" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.246479 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.255229 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bdsxv"] Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.359986 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ln82\" (UniqueName: \"kubernetes.io/projected/da9627dd-e950-47ee-93a6-3d6f10627b5d-kube-api-access-4ln82\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.360063 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-db-sync-config-data\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.360275 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-config-data\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.360457 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-combined-ca-bundle\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.462329 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-combined-ca-bundle\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.462699 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ln82\" (UniqueName: \"kubernetes.io/projected/da9627dd-e950-47ee-93a6-3d6f10627b5d-kube-api-access-4ln82\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.462748 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-db-sync-config-data\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.462881 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-config-data\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.467822 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-db-sync-config-data\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.468446 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-combined-ca-bundle\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.477155 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-config-data\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.488712 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ln82\" (UniqueName: \"kubernetes.io/projected/da9627dd-e950-47ee-93a6-3d6f10627b5d-kube-api-access-4ln82\") pod \"glance-db-sync-bdsxv\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:10 crc kubenswrapper[4940]: I1126 08:48:10.568427 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:11 crc kubenswrapper[4940]: I1126 08:48:11.058931 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bdsxv"] Nov 26 08:48:11 crc kubenswrapper[4940]: I1126 08:48:11.067949 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:48:11 crc kubenswrapper[4940]: I1126 08:48:11.959363 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bdsxv" event={"ID":"da9627dd-e950-47ee-93a6-3d6f10627b5d","Type":"ContainerStarted","Data":"165e60eb325c48b3dd380d3896afdb2e27f5ee7e879163fe9299f389ce1861b1"} Nov 26 08:48:12 crc kubenswrapper[4940]: I1126 08:48:12.165950 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:48:12 crc kubenswrapper[4940]: E1126 08:48:12.166236 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:48:26 crc kubenswrapper[4940]: I1126 08:48:26.166205 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:48:26 crc kubenswrapper[4940]: E1126 08:48:26.167165 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:48:27 crc kubenswrapper[4940]: I1126 08:48:27.095304 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bdsxv" event={"ID":"da9627dd-e950-47ee-93a6-3d6f10627b5d","Type":"ContainerStarted","Data":"bf9dd3dba79c071c7e7825c1be144a898ad7799b708b3f6d19a7cb61c4772639"} Nov 26 08:48:27 crc kubenswrapper[4940]: I1126 08:48:27.128222 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-bdsxv" podStartSLOduration=2.486605131 podStartE2EDuration="17.128202344s" podCreationTimestamp="2025-11-26 08:48:10 +0000 UTC" firstStartedPulling="2025-11-26 08:48:11.067706459 +0000 UTC m=+6792.587848078" lastFinishedPulling="2025-11-26 08:48:25.709303662 +0000 UTC m=+6807.229445291" observedRunningTime="2025-11-26 08:48:27.118438104 +0000 UTC m=+6808.638579723" watchObservedRunningTime="2025-11-26 08:48:27.128202344 +0000 UTC m=+6808.648343963" Nov 26 08:48:30 crc kubenswrapper[4940]: I1126 08:48:30.124570 4940 generic.go:334] "Generic (PLEG): container finished" podID="da9627dd-e950-47ee-93a6-3d6f10627b5d" containerID="bf9dd3dba79c071c7e7825c1be144a898ad7799b708b3f6d19a7cb61c4772639" exitCode=0 Nov 26 08:48:30 crc kubenswrapper[4940]: I1126 08:48:30.124674 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bdsxv" event={"ID":"da9627dd-e950-47ee-93a6-3d6f10627b5d","Type":"ContainerDied","Data":"bf9dd3dba79c071c7e7825c1be144a898ad7799b708b3f6d19a7cb61c4772639"} Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.609905 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.756446 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-config-data\") pod \"da9627dd-e950-47ee-93a6-3d6f10627b5d\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.757073 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-db-sync-config-data\") pod \"da9627dd-e950-47ee-93a6-3d6f10627b5d\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.757376 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-combined-ca-bundle\") pod \"da9627dd-e950-47ee-93a6-3d6f10627b5d\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.757624 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ln82\" (UniqueName: \"kubernetes.io/projected/da9627dd-e950-47ee-93a6-3d6f10627b5d-kube-api-access-4ln82\") pod \"da9627dd-e950-47ee-93a6-3d6f10627b5d\" (UID: \"da9627dd-e950-47ee-93a6-3d6f10627b5d\") " Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.763610 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da9627dd-e950-47ee-93a6-3d6f10627b5d-kube-api-access-4ln82" (OuterVolumeSpecName: "kube-api-access-4ln82") pod "da9627dd-e950-47ee-93a6-3d6f10627b5d" (UID: "da9627dd-e950-47ee-93a6-3d6f10627b5d"). InnerVolumeSpecName "kube-api-access-4ln82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.763781 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "da9627dd-e950-47ee-93a6-3d6f10627b5d" (UID: "da9627dd-e950-47ee-93a6-3d6f10627b5d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.799620 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-config-data" (OuterVolumeSpecName: "config-data") pod "da9627dd-e950-47ee-93a6-3d6f10627b5d" (UID: "da9627dd-e950-47ee-93a6-3d6f10627b5d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.801339 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da9627dd-e950-47ee-93a6-3d6f10627b5d" (UID: "da9627dd-e950-47ee-93a6-3d6f10627b5d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.862109 4940 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.862158 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.862177 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ln82\" (UniqueName: \"kubernetes.io/projected/da9627dd-e950-47ee-93a6-3d6f10627b5d-kube-api-access-4ln82\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:31 crc kubenswrapper[4940]: I1126 08:48:31.862200 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da9627dd-e950-47ee-93a6-3d6f10627b5d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.184500 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bdsxv" event={"ID":"da9627dd-e950-47ee-93a6-3d6f10627b5d","Type":"ContainerDied","Data":"165e60eb325c48b3dd380d3896afdb2e27f5ee7e879163fe9299f389ce1861b1"} Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.184539 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="165e60eb325c48b3dd380d3896afdb2e27f5ee7e879163fe9299f389ce1861b1" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.184591 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bdsxv" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.567326 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:32 crc kubenswrapper[4940]: E1126 08:48:32.568467 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da9627dd-e950-47ee-93a6-3d6f10627b5d" containerName="glance-db-sync" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.569789 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="da9627dd-e950-47ee-93a6-3d6f10627b5d" containerName="glance-db-sync" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.570062 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="da9627dd-e950-47ee-93a6-3d6f10627b5d" containerName="glance-db-sync" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.571200 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.584625 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.584665 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.585230 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6cn6v" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.586300 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.605013 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.675597 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.676129 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc9bg\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-kube-api-access-tc9bg\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.676285 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-scripts\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.676352 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-logs\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.676438 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.676533 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-ceph\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.676604 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-config-data\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.702690 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f5647cf7c-cw4p8"] Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.706889 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.718536 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f5647cf7c-cw4p8"] Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.778024 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc9bg\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-kube-api-access-tc9bg\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.778140 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-scripts\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.778156 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-logs\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.778193 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.778219 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-ceph\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.778235 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-config-data\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.778289 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.778772 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.788462 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-logs\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.804619 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc9bg\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-kube-api-access-tc9bg\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.807068 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-ceph\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.807392 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.808499 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-scripts\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.809585 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-config-data\") pod \"glance-default-external-api-0\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.880247 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-dns-svc\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.880592 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.880777 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.880919 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-config\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.881058 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzpf2\" (UniqueName: \"kubernetes.io/projected/1846409b-94ff-442a-8021-cec25d90e9cc-kube-api-access-rzpf2\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.905626 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.966102 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.967880 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.974863 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.979634 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.983359 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.983971 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.984124 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-config\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.984237 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzpf2\" (UniqueName: \"kubernetes.io/projected/1846409b-94ff-442a-8021-cec25d90e9cc-kube-api-access-rzpf2\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.984369 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-dns-svc\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.985377 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-config\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.985557 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.985868 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:32 crc kubenswrapper[4940]: I1126 08:48:32.986791 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-dns-svc\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.019254 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzpf2\" (UniqueName: \"kubernetes.io/projected/1846409b-94ff-442a-8021-cec25d90e9cc-kube-api-access-rzpf2\") pod \"dnsmasq-dns-6f5647cf7c-cw4p8\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.026884 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.089091 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.089179 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-ceph\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.089217 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.089273 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f57c4\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-kube-api-access-f57c4\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.089300 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.089331 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.089396 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-logs\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.191650 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f57c4\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-kube-api-access-f57c4\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.191704 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.191746 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.191829 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-logs\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.191865 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.191928 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-ceph\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.191959 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.192427 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.192639 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-logs\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.195819 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-ceph\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.197400 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.201570 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.204777 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.212865 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f57c4\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-kube-api-access-f57c4\") pod \"glance-default-internal-api-0\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.310626 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:33 crc kubenswrapper[4940]: W1126 08:48:33.539755 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d5c7d13_ce42_4d22_a720_c985fe79b585.slice/crio-b480bc37571030b20105468f5054613ecea92c3ebb9770d2641a496397395db0 WatchSource:0}: Error finding container b480bc37571030b20105468f5054613ecea92c3ebb9770d2641a496397395db0: Status 404 returned error can't find the container with id b480bc37571030b20105468f5054613ecea92c3ebb9770d2641a496397395db0 Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.540455 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.570973 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f5647cf7c-cw4p8"] Nov 26 08:48:33 crc kubenswrapper[4940]: W1126 08:48:33.580376 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1846409b_94ff_442a_8021_cec25d90e9cc.slice/crio-9c95800eb58370d048343b9e4f05d6d0ba802ccad6a7ff9ef6569461eba88772 WatchSource:0}: Error finding container 9c95800eb58370d048343b9e4f05d6d0ba802ccad6a7ff9ef6569461eba88772: Status 404 returned error can't find the container with id 9c95800eb58370d048343b9e4f05d6d0ba802ccad6a7ff9ef6569461eba88772 Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.807456 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:33 crc kubenswrapper[4940]: I1126 08:48:33.862992 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:34 crc kubenswrapper[4940]: I1126 08:48:34.210464 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb621d7c-3594-45c8-b141-b3b4cecd3167","Type":"ContainerStarted","Data":"0482b7b16a4aa475118bb313cf8ac1807277f6d50b9842c66d2a41d1ad3bde75"} Nov 26 08:48:34 crc kubenswrapper[4940]: I1126 08:48:34.212014 4940 generic.go:334] "Generic (PLEG): container finished" podID="1846409b-94ff-442a-8021-cec25d90e9cc" containerID="54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594" exitCode=0 Nov 26 08:48:34 crc kubenswrapper[4940]: I1126 08:48:34.212915 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" event={"ID":"1846409b-94ff-442a-8021-cec25d90e9cc","Type":"ContainerDied","Data":"54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594"} Nov 26 08:48:34 crc kubenswrapper[4940]: I1126 08:48:34.212952 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" event={"ID":"1846409b-94ff-442a-8021-cec25d90e9cc","Type":"ContainerStarted","Data":"9c95800eb58370d048343b9e4f05d6d0ba802ccad6a7ff9ef6569461eba88772"} Nov 26 08:48:34 crc kubenswrapper[4940]: I1126 08:48:34.222714 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d5c7d13-ce42-4d22-a720-c985fe79b585","Type":"ContainerStarted","Data":"a42817c884d0ddd6bf93f456cc58add16eddbdef7698ddaf401989ed8ef00fe7"} Nov 26 08:48:34 crc kubenswrapper[4940]: I1126 08:48:34.222759 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d5c7d13-ce42-4d22-a720-c985fe79b585","Type":"ContainerStarted","Data":"b480bc37571030b20105468f5054613ecea92c3ebb9770d2641a496397395db0"} Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.233114 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d5c7d13-ce42-4d22-a720-c985fe79b585","Type":"ContainerStarted","Data":"adc75a3f5391ca2b9acacb2f6731d0c4dce3154523346fb959c2b50ada331a2a"} Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.233662 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerName="glance-log" containerID="cri-o://a42817c884d0ddd6bf93f456cc58add16eddbdef7698ddaf401989ed8ef00fe7" gracePeriod=30 Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.234407 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerName="glance-httpd" containerID="cri-o://adc75a3f5391ca2b9acacb2f6731d0c4dce3154523346fb959c2b50ada331a2a" gracePeriod=30 Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.238490 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb621d7c-3594-45c8-b141-b3b4cecd3167","Type":"ContainerStarted","Data":"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2"} Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.238532 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb621d7c-3594-45c8-b141-b3b4cecd3167","Type":"ContainerStarted","Data":"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98"} Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.249670 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" event={"ID":"1846409b-94ff-442a-8021-cec25d90e9cc","Type":"ContainerStarted","Data":"ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff"} Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.250133 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.264837 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.2648164680000002 podStartE2EDuration="3.264816468s" podCreationTimestamp="2025-11-26 08:48:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:48:35.256489563 +0000 UTC m=+6816.776631192" watchObservedRunningTime="2025-11-26 08:48:35.264816468 +0000 UTC m=+6816.784958087" Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.286518 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" podStartSLOduration=3.286497737 podStartE2EDuration="3.286497737s" podCreationTimestamp="2025-11-26 08:48:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:48:35.276606643 +0000 UTC m=+6816.796748262" watchObservedRunningTime="2025-11-26 08:48:35.286497737 +0000 UTC m=+6816.806639366" Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.300656 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.300635577 podStartE2EDuration="3.300635577s" podCreationTimestamp="2025-11-26 08:48:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:48:35.297687683 +0000 UTC m=+6816.817829312" watchObservedRunningTime="2025-11-26 08:48:35.300635577 +0000 UTC m=+6816.820777196" Nov 26 08:48:35 crc kubenswrapper[4940]: I1126 08:48:35.627660 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.262443 4940 generic.go:334] "Generic (PLEG): container finished" podID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerID="adc75a3f5391ca2b9acacb2f6731d0c4dce3154523346fb959c2b50ada331a2a" exitCode=0 Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.262751 4940 generic.go:334] "Generic (PLEG): container finished" podID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerID="a42817c884d0ddd6bf93f456cc58add16eddbdef7698ddaf401989ed8ef00fe7" exitCode=143 Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.263471 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d5c7d13-ce42-4d22-a720-c985fe79b585","Type":"ContainerDied","Data":"adc75a3f5391ca2b9acacb2f6731d0c4dce3154523346fb959c2b50ada331a2a"} Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.263499 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d5c7d13-ce42-4d22-a720-c985fe79b585","Type":"ContainerDied","Data":"a42817c884d0ddd6bf93f456cc58add16eddbdef7698ddaf401989ed8ef00fe7"} Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.263513 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d5c7d13-ce42-4d22-a720-c985fe79b585","Type":"ContainerDied","Data":"b480bc37571030b20105468f5054613ecea92c3ebb9770d2641a496397395db0"} Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.263524 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b480bc37571030b20105468f5054613ecea92c3ebb9770d2641a496397395db0" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.292886 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.458581 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-scripts\") pod \"9d5c7d13-ce42-4d22-a720-c985fe79b585\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.458682 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-ceph\") pod \"9d5c7d13-ce42-4d22-a720-c985fe79b585\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.458747 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-combined-ca-bundle\") pod \"9d5c7d13-ce42-4d22-a720-c985fe79b585\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.458817 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-config-data\") pod \"9d5c7d13-ce42-4d22-a720-c985fe79b585\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.458860 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-logs\") pod \"9d5c7d13-ce42-4d22-a720-c985fe79b585\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.458947 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-httpd-run\") pod \"9d5c7d13-ce42-4d22-a720-c985fe79b585\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.458987 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tc9bg\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-kube-api-access-tc9bg\") pod \"9d5c7d13-ce42-4d22-a720-c985fe79b585\" (UID: \"9d5c7d13-ce42-4d22-a720-c985fe79b585\") " Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.459359 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9d5c7d13-ce42-4d22-a720-c985fe79b585" (UID: "9d5c7d13-ce42-4d22-a720-c985fe79b585"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.459413 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-logs" (OuterVolumeSpecName: "logs") pod "9d5c7d13-ce42-4d22-a720-c985fe79b585" (UID: "9d5c7d13-ce42-4d22-a720-c985fe79b585"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.459758 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.459784 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d5c7d13-ce42-4d22-a720-c985fe79b585-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.465315 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-scripts" (OuterVolumeSpecName: "scripts") pod "9d5c7d13-ce42-4d22-a720-c985fe79b585" (UID: "9d5c7d13-ce42-4d22-a720-c985fe79b585"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.465326 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-kube-api-access-tc9bg" (OuterVolumeSpecName: "kube-api-access-tc9bg") pod "9d5c7d13-ce42-4d22-a720-c985fe79b585" (UID: "9d5c7d13-ce42-4d22-a720-c985fe79b585"). InnerVolumeSpecName "kube-api-access-tc9bg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.465458 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-ceph" (OuterVolumeSpecName: "ceph") pod "9d5c7d13-ce42-4d22-a720-c985fe79b585" (UID: "9d5c7d13-ce42-4d22-a720-c985fe79b585"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.482183 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d5c7d13-ce42-4d22-a720-c985fe79b585" (UID: "9d5c7d13-ce42-4d22-a720-c985fe79b585"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.499163 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-config-data" (OuterVolumeSpecName: "config-data") pod "9d5c7d13-ce42-4d22-a720-c985fe79b585" (UID: "9d5c7d13-ce42-4d22-a720-c985fe79b585"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.560814 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.560848 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.560857 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tc9bg\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-kube-api-access-tc9bg\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.560867 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d5c7d13-ce42-4d22-a720-c985fe79b585-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:36 crc kubenswrapper[4940]: I1126 08:48:36.560877 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9d5c7d13-ce42-4d22-a720-c985fe79b585-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.275726 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.275832 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerName="glance-log" containerID="cri-o://a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98" gracePeriod=30 Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.276216 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerName="glance-httpd" containerID="cri-o://420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2" gracePeriod=30 Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.316167 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.325158 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.350868 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:37 crc kubenswrapper[4940]: E1126 08:48:37.351298 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerName="glance-log" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.351316 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerName="glance-log" Nov 26 08:48:37 crc kubenswrapper[4940]: E1126 08:48:37.351346 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerName="glance-httpd" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.351352 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerName="glance-httpd" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.351508 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerName="glance-log" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.351538 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" containerName="glance-httpd" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.352488 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.356259 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.366460 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.477125 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.477252 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.477306 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn2kv\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-kube-api-access-xn2kv\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.477456 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-logs\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.477617 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-ceph\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.477769 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-scripts\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.477812 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-config-data\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.579081 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-logs\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.579191 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-ceph\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.579232 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-scripts\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.579259 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-config-data\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.579375 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.579403 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.579436 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn2kv\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-kube-api-access-xn2kv\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.579874 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-logs\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.580358 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.584372 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-config-data\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.586725 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-scripts\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.589742 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-ceph\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.590332 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.596178 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn2kv\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-kube-api-access-xn2kv\") pod \"glance-default-external-api-0\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.680876 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.892197 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.985571 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-httpd-run\") pod \"fb621d7c-3594-45c8-b141-b3b4cecd3167\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.985693 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-config-data\") pod \"fb621d7c-3594-45c8-b141-b3b4cecd3167\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.985762 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-scripts\") pod \"fb621d7c-3594-45c8-b141-b3b4cecd3167\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.985808 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-combined-ca-bundle\") pod \"fb621d7c-3594-45c8-b141-b3b4cecd3167\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.985887 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f57c4\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-kube-api-access-f57c4\") pod \"fb621d7c-3594-45c8-b141-b3b4cecd3167\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.985943 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-logs\") pod \"fb621d7c-3594-45c8-b141-b3b4cecd3167\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.986014 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-ceph\") pod \"fb621d7c-3594-45c8-b141-b3b4cecd3167\" (UID: \"fb621d7c-3594-45c8-b141-b3b4cecd3167\") " Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.986420 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fb621d7c-3594-45c8-b141-b3b4cecd3167" (UID: "fb621d7c-3594-45c8-b141-b3b4cecd3167"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.986474 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-logs" (OuterVolumeSpecName: "logs") pod "fb621d7c-3594-45c8-b141-b3b4cecd3167" (UID: "fb621d7c-3594-45c8-b141-b3b4cecd3167"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.987094 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.987128 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fb621d7c-3594-45c8-b141-b3b4cecd3167-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.989798 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-kube-api-access-f57c4" (OuterVolumeSpecName: "kube-api-access-f57c4") pod "fb621d7c-3594-45c8-b141-b3b4cecd3167" (UID: "fb621d7c-3594-45c8-b141-b3b4cecd3167"). InnerVolumeSpecName "kube-api-access-f57c4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.990314 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-scripts" (OuterVolumeSpecName: "scripts") pod "fb621d7c-3594-45c8-b141-b3b4cecd3167" (UID: "fb621d7c-3594-45c8-b141-b3b4cecd3167"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:37 crc kubenswrapper[4940]: I1126 08:48:37.992757 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-ceph" (OuterVolumeSpecName: "ceph") pod "fb621d7c-3594-45c8-b141-b3b4cecd3167" (UID: "fb621d7c-3594-45c8-b141-b3b4cecd3167"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.015868 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb621d7c-3594-45c8-b141-b3b4cecd3167" (UID: "fb621d7c-3594-45c8-b141-b3b4cecd3167"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.040705 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-config-data" (OuterVolumeSpecName: "config-data") pod "fb621d7c-3594-45c8-b141-b3b4cecd3167" (UID: "fb621d7c-3594-45c8-b141-b3b4cecd3167"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.088257 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f57c4\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-kube-api-access-f57c4\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.088534 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/fb621d7c-3594-45c8-b141-b3b4cecd3167-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.088598 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.088653 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.088713 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb621d7c-3594-45c8-b141-b3b4cecd3167-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.268764 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.284126 4940 generic.go:334] "Generic (PLEG): container finished" podID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerID="420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2" exitCode=0 Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.284155 4940 generic.go:334] "Generic (PLEG): container finished" podID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerID="a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98" exitCode=143 Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.284184 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.284230 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb621d7c-3594-45c8-b141-b3b4cecd3167","Type":"ContainerDied","Data":"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2"} Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.284278 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb621d7c-3594-45c8-b141-b3b4cecd3167","Type":"ContainerDied","Data":"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98"} Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.284299 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fb621d7c-3594-45c8-b141-b3b4cecd3167","Type":"ContainerDied","Data":"0482b7b16a4aa475118bb313cf8ac1807277f6d50b9842c66d2a41d1ad3bde75"} Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.284317 4940 scope.go:117] "RemoveContainer" containerID="420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.285318 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db","Type":"ContainerStarted","Data":"10706840b6c68785e82ab326bfff9027a0e0e8d1eb53d24927d18f613a4d4722"} Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.309294 4940 scope.go:117] "RemoveContainer" containerID="a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.319493 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.338388 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.357269 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:38 crc kubenswrapper[4940]: E1126 08:48:38.357932 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerName="glance-httpd" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.357956 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerName="glance-httpd" Nov 26 08:48:38 crc kubenswrapper[4940]: E1126 08:48:38.358006 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerName="glance-log" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.358018 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerName="glance-log" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.358372 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerName="glance-log" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.358415 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" containerName="glance-httpd" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.359704 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.361846 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.369010 4940 scope.go:117] "RemoveContainer" containerID="420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2" Nov 26 08:48:38 crc kubenswrapper[4940]: E1126 08:48:38.370909 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2\": container with ID starting with 420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2 not found: ID does not exist" containerID="420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.370951 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2"} err="failed to get container status \"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2\": rpc error: code = NotFound desc = could not find container \"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2\": container with ID starting with 420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2 not found: ID does not exist" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.370975 4940 scope.go:117] "RemoveContainer" containerID="a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98" Nov 26 08:48:38 crc kubenswrapper[4940]: E1126 08:48:38.371384 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98\": container with ID starting with a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98 not found: ID does not exist" containerID="a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.371409 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98"} err="failed to get container status \"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98\": rpc error: code = NotFound desc = could not find container \"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98\": container with ID starting with a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98 not found: ID does not exist" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.371423 4940 scope.go:117] "RemoveContainer" containerID="420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.373460 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2"} err="failed to get container status \"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2\": rpc error: code = NotFound desc = could not find container \"420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2\": container with ID starting with 420a6718da1df8364ec8fdc765b83455355dcab00397e25f5ec4b50f3fe00df2 not found: ID does not exist" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.373490 4940 scope.go:117] "RemoveContainer" containerID="a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.373734 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98"} err="failed to get container status \"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98\": rpc error: code = NotFound desc = could not find container \"a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98\": container with ID starting with a58b1063e2bbc2572cd49a89533de40084f2b6bd1b56bbfce4e1db2c21642c98 not found: ID does not exist" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.379886 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.494996 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.495061 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.495311 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.495417 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d6f4\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-kube-api-access-5d6f4\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.495529 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.495632 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.495684 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-logs\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.597112 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.597178 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.597208 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.597264 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d6f4\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-kube-api-access-5d6f4\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.597340 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.597420 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.597469 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-logs\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.597948 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.598032 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-logs\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.602948 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.603702 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-ceph\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.606242 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.606258 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.611999 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d6f4\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-kube-api-access-5d6f4\") pod \"glance-default-internal-api-0\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:48:38 crc kubenswrapper[4940]: I1126 08:48:38.683385 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:39 crc kubenswrapper[4940]: I1126 08:48:39.189598 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d5c7d13-ce42-4d22-a720-c985fe79b585" path="/var/lib/kubelet/pods/9d5c7d13-ce42-4d22-a720-c985fe79b585/volumes" Nov 26 08:48:39 crc kubenswrapper[4940]: I1126 08:48:39.190861 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb621d7c-3594-45c8-b141-b3b4cecd3167" path="/var/lib/kubelet/pods/fb621d7c-3594-45c8-b141-b3b4cecd3167/volumes" Nov 26 08:48:39 crc kubenswrapper[4940]: I1126 08:48:39.191503 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:48:39 crc kubenswrapper[4940]: I1126 08:48:39.297180 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db","Type":"ContainerStarted","Data":"4d0dac25a3a7420857bb6f1c73f51ac9104242139c009d040c9920f8a02ead4e"} Nov 26 08:48:39 crc kubenswrapper[4940]: I1126 08:48:39.299086 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d0092e5e-b891-449c-a390-ad9741a14b8f","Type":"ContainerStarted","Data":"7987b7bedffebc61ea05308e149acadbdcc2e537d1f0eb5b97f6f9dbfcf74ac7"} Nov 26 08:48:40 crc kubenswrapper[4940]: I1126 08:48:40.310455 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d0092e5e-b891-449c-a390-ad9741a14b8f","Type":"ContainerStarted","Data":"a64b289b03cd3b7766145b3a34cb3b7a8dbb600e3ea64d6ad45822a6ac77cced"} Nov 26 08:48:40 crc kubenswrapper[4940]: I1126 08:48:40.310762 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d0092e5e-b891-449c-a390-ad9741a14b8f","Type":"ContainerStarted","Data":"d881374d257f417e1c4f533832cbdd4450b2c7c56acd2a6f0774f3372b5980cd"} Nov 26 08:48:40 crc kubenswrapper[4940]: I1126 08:48:40.313271 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db","Type":"ContainerStarted","Data":"db8e92b7ef4d74dc42bc30a9e5d43e2e555e54e1c654e038cd2a7bcd1a001db5"} Nov 26 08:48:40 crc kubenswrapper[4940]: I1126 08:48:40.332145 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=2.332118537 podStartE2EDuration="2.332118537s" podCreationTimestamp="2025-11-26 08:48:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:48:40.331258089 +0000 UTC m=+6821.851399718" watchObservedRunningTime="2025-11-26 08:48:40.332118537 +0000 UTC m=+6821.852260176" Nov 26 08:48:40 crc kubenswrapper[4940]: I1126 08:48:40.356150 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.35613211 podStartE2EDuration="3.35613211s" podCreationTimestamp="2025-11-26 08:48:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:48:40.351660768 +0000 UTC m=+6821.871802387" watchObservedRunningTime="2025-11-26 08:48:40.35613211 +0000 UTC m=+6821.876273729" Nov 26 08:48:41 crc kubenswrapper[4940]: I1126 08:48:41.167001 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:48:41 crc kubenswrapper[4940]: E1126 08:48:41.167855 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:48:43 crc kubenswrapper[4940]: I1126 08:48:43.028235 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:48:43 crc kubenswrapper[4940]: I1126 08:48:43.089926 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6564d966f9-rdpnp"] Nov 26 08:48:43 crc kubenswrapper[4940]: I1126 08:48:43.090206 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" podUID="b849da81-4187-4d5e-9077-d6f3affd46f0" containerName="dnsmasq-dns" containerID="cri-o://c8992018d04e40e16e5d12c0c843c39118f802f8850f42cb6b9f571e550497f4" gracePeriod=10 Nov 26 08:48:43 crc kubenswrapper[4940]: I1126 08:48:43.346697 4940 generic.go:334] "Generic (PLEG): container finished" podID="b849da81-4187-4d5e-9077-d6f3affd46f0" containerID="c8992018d04e40e16e5d12c0c843c39118f802f8850f42cb6b9f571e550497f4" exitCode=0 Nov 26 08:48:43 crc kubenswrapper[4940]: I1126 08:48:43.346852 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" event={"ID":"b849da81-4187-4d5e-9077-d6f3affd46f0","Type":"ContainerDied","Data":"c8992018d04e40e16e5d12c0c843c39118f802f8850f42cb6b9f571e550497f4"} Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.363853 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" event={"ID":"b849da81-4187-4d5e-9077-d6f3affd46f0","Type":"ContainerDied","Data":"ac85a7325bf1fc7030dba8c63d01d268402e8058b351764cbad683c67533ab6d"} Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.364200 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac85a7325bf1fc7030dba8c63d01d268402e8058b351764cbad683c67533ab6d" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.381246 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.498635 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-config\") pod \"b849da81-4187-4d5e-9077-d6f3affd46f0\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.498704 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-sb\") pod \"b849da81-4187-4d5e-9077-d6f3affd46f0\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.498751 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpzvr\" (UniqueName: \"kubernetes.io/projected/b849da81-4187-4d5e-9077-d6f3affd46f0-kube-api-access-mpzvr\") pod \"b849da81-4187-4d5e-9077-d6f3affd46f0\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.498854 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-nb\") pod \"b849da81-4187-4d5e-9077-d6f3affd46f0\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.498956 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-dns-svc\") pod \"b849da81-4187-4d5e-9077-d6f3affd46f0\" (UID: \"b849da81-4187-4d5e-9077-d6f3affd46f0\") " Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.509821 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b849da81-4187-4d5e-9077-d6f3affd46f0-kube-api-access-mpzvr" (OuterVolumeSpecName: "kube-api-access-mpzvr") pod "b849da81-4187-4d5e-9077-d6f3affd46f0" (UID: "b849da81-4187-4d5e-9077-d6f3affd46f0"). InnerVolumeSpecName "kube-api-access-mpzvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.541166 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b849da81-4187-4d5e-9077-d6f3affd46f0" (UID: "b849da81-4187-4d5e-9077-d6f3affd46f0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.545178 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-config" (OuterVolumeSpecName: "config") pod "b849da81-4187-4d5e-9077-d6f3affd46f0" (UID: "b849da81-4187-4d5e-9077-d6f3affd46f0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.552980 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b849da81-4187-4d5e-9077-d6f3affd46f0" (UID: "b849da81-4187-4d5e-9077-d6f3affd46f0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.560764 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b849da81-4187-4d5e-9077-d6f3affd46f0" (UID: "b849da81-4187-4d5e-9077-d6f3affd46f0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.600830 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.600877 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.600895 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpzvr\" (UniqueName: \"kubernetes.io/projected/b849da81-4187-4d5e-9077-d6f3affd46f0-kube-api-access-mpzvr\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.600909 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:44 crc kubenswrapper[4940]: I1126 08:48:44.600920 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b849da81-4187-4d5e-9077-d6f3affd46f0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:48:45 crc kubenswrapper[4940]: I1126 08:48:45.378120 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6564d966f9-rdpnp" Nov 26 08:48:45 crc kubenswrapper[4940]: I1126 08:48:45.417531 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6564d966f9-rdpnp"] Nov 26 08:48:45 crc kubenswrapper[4940]: I1126 08:48:45.430956 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6564d966f9-rdpnp"] Nov 26 08:48:47 crc kubenswrapper[4940]: I1126 08:48:47.185982 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b849da81-4187-4d5e-9077-d6f3affd46f0" path="/var/lib/kubelet/pods/b849da81-4187-4d5e-9077-d6f3affd46f0/volumes" Nov 26 08:48:47 crc kubenswrapper[4940]: I1126 08:48:47.682763 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 08:48:47 crc kubenswrapper[4940]: I1126 08:48:47.683258 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 08:48:47 crc kubenswrapper[4940]: I1126 08:48:47.734961 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 08:48:47 crc kubenswrapper[4940]: I1126 08:48:47.759322 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 08:48:48 crc kubenswrapper[4940]: I1126 08:48:48.421529 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 08:48:48 crc kubenswrapper[4940]: I1126 08:48:48.421794 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 08:48:48 crc kubenswrapper[4940]: I1126 08:48:48.683910 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:48 crc kubenswrapper[4940]: I1126 08:48:48.683989 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:48 crc kubenswrapper[4940]: I1126 08:48:48.716593 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:48 crc kubenswrapper[4940]: I1126 08:48:48.725729 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:49 crc kubenswrapper[4940]: I1126 08:48:49.433289 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:49 crc kubenswrapper[4940]: I1126 08:48:49.433769 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:50 crc kubenswrapper[4940]: I1126 08:48:50.344409 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 08:48:50 crc kubenswrapper[4940]: I1126 08:48:50.351959 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 08:48:51 crc kubenswrapper[4940]: I1126 08:48:51.401746 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:51 crc kubenswrapper[4940]: I1126 08:48:51.408565 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 08:48:54 crc kubenswrapper[4940]: I1126 08:48:54.165546 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:48:54 crc kubenswrapper[4940]: E1126 08:48:54.166409 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:49:00 crc kubenswrapper[4940]: I1126 08:49:00.937124 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-ktrvj"] Nov 26 08:49:00 crc kubenswrapper[4940]: E1126 08:49:00.937871 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b849da81-4187-4d5e-9077-d6f3affd46f0" containerName="init" Nov 26 08:49:00 crc kubenswrapper[4940]: I1126 08:49:00.937884 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b849da81-4187-4d5e-9077-d6f3affd46f0" containerName="init" Nov 26 08:49:00 crc kubenswrapper[4940]: E1126 08:49:00.937917 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b849da81-4187-4d5e-9077-d6f3affd46f0" containerName="dnsmasq-dns" Nov 26 08:49:00 crc kubenswrapper[4940]: I1126 08:49:00.937923 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b849da81-4187-4d5e-9077-d6f3affd46f0" containerName="dnsmasq-dns" Nov 26 08:49:00 crc kubenswrapper[4940]: I1126 08:49:00.938123 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b849da81-4187-4d5e-9077-d6f3affd46f0" containerName="dnsmasq-dns" Nov 26 08:49:00 crc kubenswrapper[4940]: I1126 08:49:00.938855 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:00 crc kubenswrapper[4940]: I1126 08:49:00.952725 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ktrvj"] Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.012560 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89hs5\" (UniqueName: \"kubernetes.io/projected/37c5efdc-60d5-4bf3-b84f-84459d2ba431-kube-api-access-89hs5\") pod \"placement-db-create-ktrvj\" (UID: \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\") " pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.012656 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37c5efdc-60d5-4bf3-b84f-84459d2ba431-operator-scripts\") pod \"placement-db-create-ktrvj\" (UID: \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\") " pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.114220 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89hs5\" (UniqueName: \"kubernetes.io/projected/37c5efdc-60d5-4bf3-b84f-84459d2ba431-kube-api-access-89hs5\") pod \"placement-db-create-ktrvj\" (UID: \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\") " pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.114308 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37c5efdc-60d5-4bf3-b84f-84459d2ba431-operator-scripts\") pod \"placement-db-create-ktrvj\" (UID: \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\") " pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.115276 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37c5efdc-60d5-4bf3-b84f-84459d2ba431-operator-scripts\") pod \"placement-db-create-ktrvj\" (UID: \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\") " pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.137873 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89hs5\" (UniqueName: \"kubernetes.io/projected/37c5efdc-60d5-4bf3-b84f-84459d2ba431-kube-api-access-89hs5\") pod \"placement-db-create-ktrvj\" (UID: \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\") " pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.142404 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-984f-account-create-update-ht6qm"] Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.143535 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.145715 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.150177 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-984f-account-create-update-ht6qm"] Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.216210 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trnqv\" (UniqueName: \"kubernetes.io/projected/623678ef-a5c7-48a1-ac34-f65b60fd7d54-kube-api-access-trnqv\") pod \"placement-984f-account-create-update-ht6qm\" (UID: \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\") " pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.216508 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/623678ef-a5c7-48a1-ac34-f65b60fd7d54-operator-scripts\") pod \"placement-984f-account-create-update-ht6qm\" (UID: \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\") " pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.262672 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.318691 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trnqv\" (UniqueName: \"kubernetes.io/projected/623678ef-a5c7-48a1-ac34-f65b60fd7d54-kube-api-access-trnqv\") pod \"placement-984f-account-create-update-ht6qm\" (UID: \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\") " pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.319200 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/623678ef-a5c7-48a1-ac34-f65b60fd7d54-operator-scripts\") pod \"placement-984f-account-create-update-ht6qm\" (UID: \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\") " pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.319857 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/623678ef-a5c7-48a1-ac34-f65b60fd7d54-operator-scripts\") pod \"placement-984f-account-create-update-ht6qm\" (UID: \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\") " pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.338003 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trnqv\" (UniqueName: \"kubernetes.io/projected/623678ef-a5c7-48a1-ac34-f65b60fd7d54-kube-api-access-trnqv\") pod \"placement-984f-account-create-update-ht6qm\" (UID: \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\") " pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.491810 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.732864 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ktrvj"] Nov 26 08:49:01 crc kubenswrapper[4940]: I1126 08:49:01.929874 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-984f-account-create-update-ht6qm"] Nov 26 08:49:01 crc kubenswrapper[4940]: W1126 08:49:01.934267 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod623678ef_a5c7_48a1_ac34_f65b60fd7d54.slice/crio-22839f2c83ef68ae11ed443b0cd17915026824cb0495edfc509f7837b9333f5e WatchSource:0}: Error finding container 22839f2c83ef68ae11ed443b0cd17915026824cb0495edfc509f7837b9333f5e: Status 404 returned error can't find the container with id 22839f2c83ef68ae11ed443b0cd17915026824cb0495edfc509f7837b9333f5e Nov 26 08:49:02 crc kubenswrapper[4940]: I1126 08:49:02.584727 4940 generic.go:334] "Generic (PLEG): container finished" podID="37c5efdc-60d5-4bf3-b84f-84459d2ba431" containerID="44d9771090f0e1948df63e142bdf4b6ac9cbabfa67185f72108fbb55a8c7037d" exitCode=0 Nov 26 08:49:02 crc kubenswrapper[4940]: I1126 08:49:02.584818 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ktrvj" event={"ID":"37c5efdc-60d5-4bf3-b84f-84459d2ba431","Type":"ContainerDied","Data":"44d9771090f0e1948df63e142bdf4b6ac9cbabfa67185f72108fbb55a8c7037d"} Nov 26 08:49:02 crc kubenswrapper[4940]: I1126 08:49:02.585326 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ktrvj" event={"ID":"37c5efdc-60d5-4bf3-b84f-84459d2ba431","Type":"ContainerStarted","Data":"7f9d024ae47ec804787e21039459df2abce57e4b3c89b18dce632e5ed165dda6"} Nov 26 08:49:02 crc kubenswrapper[4940]: I1126 08:49:02.588314 4940 generic.go:334] "Generic (PLEG): container finished" podID="623678ef-a5c7-48a1-ac34-f65b60fd7d54" containerID="cfa131c65c040ecbbf657175a4367c918a0f73bb202f333239949aee6bde1f75" exitCode=0 Nov 26 08:49:02 crc kubenswrapper[4940]: I1126 08:49:02.588397 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-984f-account-create-update-ht6qm" event={"ID":"623678ef-a5c7-48a1-ac34-f65b60fd7d54","Type":"ContainerDied","Data":"cfa131c65c040ecbbf657175a4367c918a0f73bb202f333239949aee6bde1f75"} Nov 26 08:49:02 crc kubenswrapper[4940]: I1126 08:49:02.588447 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-984f-account-create-update-ht6qm" event={"ID":"623678ef-a5c7-48a1-ac34-f65b60fd7d54","Type":"ContainerStarted","Data":"22839f2c83ef68ae11ed443b0cd17915026824cb0495edfc509f7837b9333f5e"} Nov 26 08:49:03 crc kubenswrapper[4940]: I1126 08:49:03.998207 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.078968 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89hs5\" (UniqueName: \"kubernetes.io/projected/37c5efdc-60d5-4bf3-b84f-84459d2ba431-kube-api-access-89hs5\") pod \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\" (UID: \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\") " Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.079146 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37c5efdc-60d5-4bf3-b84f-84459d2ba431-operator-scripts\") pod \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\" (UID: \"37c5efdc-60d5-4bf3-b84f-84459d2ba431\") " Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.080203 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37c5efdc-60d5-4bf3-b84f-84459d2ba431-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37c5efdc-60d5-4bf3-b84f-84459d2ba431" (UID: "37c5efdc-60d5-4bf3-b84f-84459d2ba431"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.080406 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37c5efdc-60d5-4bf3-b84f-84459d2ba431-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.084734 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37c5efdc-60d5-4bf3-b84f-84459d2ba431-kube-api-access-89hs5" (OuterVolumeSpecName: "kube-api-access-89hs5") pod "37c5efdc-60d5-4bf3-b84f-84459d2ba431" (UID: "37c5efdc-60d5-4bf3-b84f-84459d2ba431"). InnerVolumeSpecName "kube-api-access-89hs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.124723 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.181764 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/623678ef-a5c7-48a1-ac34-f65b60fd7d54-operator-scripts\") pod \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\" (UID: \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\") " Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.181926 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trnqv\" (UniqueName: \"kubernetes.io/projected/623678ef-a5c7-48a1-ac34-f65b60fd7d54-kube-api-access-trnqv\") pod \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\" (UID: \"623678ef-a5c7-48a1-ac34-f65b60fd7d54\") " Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.182234 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/623678ef-a5c7-48a1-ac34-f65b60fd7d54-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "623678ef-a5c7-48a1-ac34-f65b60fd7d54" (UID: "623678ef-a5c7-48a1-ac34-f65b60fd7d54"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.182817 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/623678ef-a5c7-48a1-ac34-f65b60fd7d54-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.182857 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89hs5\" (UniqueName: \"kubernetes.io/projected/37c5efdc-60d5-4bf3-b84f-84459d2ba431-kube-api-access-89hs5\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.185088 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/623678ef-a5c7-48a1-ac34-f65b60fd7d54-kube-api-access-trnqv" (OuterVolumeSpecName: "kube-api-access-trnqv") pod "623678ef-a5c7-48a1-ac34-f65b60fd7d54" (UID: "623678ef-a5c7-48a1-ac34-f65b60fd7d54"). InnerVolumeSpecName "kube-api-access-trnqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.286522 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trnqv\" (UniqueName: \"kubernetes.io/projected/623678ef-a5c7-48a1-ac34-f65b60fd7d54-kube-api-access-trnqv\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.617451 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ktrvj" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.617559 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ktrvj" event={"ID":"37c5efdc-60d5-4bf3-b84f-84459d2ba431","Type":"ContainerDied","Data":"7f9d024ae47ec804787e21039459df2abce57e4b3c89b18dce632e5ed165dda6"} Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.617636 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f9d024ae47ec804787e21039459df2abce57e4b3c89b18dce632e5ed165dda6" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.620884 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-984f-account-create-update-ht6qm" event={"ID":"623678ef-a5c7-48a1-ac34-f65b60fd7d54","Type":"ContainerDied","Data":"22839f2c83ef68ae11ed443b0cd17915026824cb0495edfc509f7837b9333f5e"} Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.620945 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22839f2c83ef68ae11ed443b0cd17915026824cb0495edfc509f7837b9333f5e" Nov 26 08:49:04 crc kubenswrapper[4940]: I1126 08:49:04.621000 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-984f-account-create-update-ht6qm" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.166281 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:49:06 crc kubenswrapper[4940]: E1126 08:49:06.167627 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.509273 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c7d584995-74jn2"] Nov 26 08:49:06 crc kubenswrapper[4940]: E1126 08:49:06.513417 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="623678ef-a5c7-48a1-ac34-f65b60fd7d54" containerName="mariadb-account-create-update" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.513474 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="623678ef-a5c7-48a1-ac34-f65b60fd7d54" containerName="mariadb-account-create-update" Nov 26 08:49:06 crc kubenswrapper[4940]: E1126 08:49:06.513571 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37c5efdc-60d5-4bf3-b84f-84459d2ba431" containerName="mariadb-database-create" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.513588 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="37c5efdc-60d5-4bf3-b84f-84459d2ba431" containerName="mariadb-database-create" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.514498 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="37c5efdc-60d5-4bf3-b84f-84459d2ba431" containerName="mariadb-database-create" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.514556 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="623678ef-a5c7-48a1-ac34-f65b60fd7d54" containerName="mariadb-account-create-update" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.519914 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.555872 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-4rr8h"] Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.559956 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.574122 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.574275 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.574301 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hf7c5" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.585105 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c7d584995-74jn2"] Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.599836 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4rr8h"] Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632240 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58rtk\" (UniqueName: \"kubernetes.io/projected/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-kube-api-access-58rtk\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632336 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-dns-svc\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632463 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1111ad8a-5b85-45ea-9bf7-693eef0556db-logs\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632496 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-config-data\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632525 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-sb\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632549 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-scripts\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632575 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-combined-ca-bundle\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632677 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl7wj\" (UniqueName: \"kubernetes.io/projected/1111ad8a-5b85-45ea-9bf7-693eef0556db-kube-api-access-wl7wj\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632720 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-nb\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.632744 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-config\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.734860 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1111ad8a-5b85-45ea-9bf7-693eef0556db-logs\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.734936 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-config-data\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.734967 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-sb\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.734995 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-scripts\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.735021 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-combined-ca-bundle\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.735128 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl7wj\" (UniqueName: \"kubernetes.io/projected/1111ad8a-5b85-45ea-9bf7-693eef0556db-kube-api-access-wl7wj\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.735170 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-nb\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.735190 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-config\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.735223 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58rtk\" (UniqueName: \"kubernetes.io/projected/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-kube-api-access-58rtk\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.735243 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-dns-svc\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.735459 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1111ad8a-5b85-45ea-9bf7-693eef0556db-logs\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.736138 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-config\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.736161 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-nb\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.736177 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-dns-svc\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.736186 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-sb\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.740031 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-combined-ca-bundle\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.741105 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-scripts\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.741817 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-config-data\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.751359 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl7wj\" (UniqueName: \"kubernetes.io/projected/1111ad8a-5b85-45ea-9bf7-693eef0556db-kube-api-access-wl7wj\") pod \"placement-db-sync-4rr8h\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.753858 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58rtk\" (UniqueName: \"kubernetes.io/projected/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-kube-api-access-58rtk\") pod \"dnsmasq-dns-c7d584995-74jn2\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.847669 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:06 crc kubenswrapper[4940]: I1126 08:49:06.892016 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:07 crc kubenswrapper[4940]: I1126 08:49:07.334968 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c7d584995-74jn2"] Nov 26 08:49:07 crc kubenswrapper[4940]: W1126 08:49:07.339744 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92f6a461_9fe4_4a6d_88ef_bd8bdb0906a9.slice/crio-5d877b1b4f0c44b0e7594057178f2a6659bd502ef12c07421adc09fae247476e WatchSource:0}: Error finding container 5d877b1b4f0c44b0e7594057178f2a6659bd502ef12c07421adc09fae247476e: Status 404 returned error can't find the container with id 5d877b1b4f0c44b0e7594057178f2a6659bd502ef12c07421adc09fae247476e Nov 26 08:49:07 crc kubenswrapper[4940]: W1126 08:49:07.409074 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1111ad8a_5b85_45ea_9bf7_693eef0556db.slice/crio-21e3c8fbc7e6bfda9b4b605241e4956e6a956e77f8785b627f52a8dc1ea45066 WatchSource:0}: Error finding container 21e3c8fbc7e6bfda9b4b605241e4956e6a956e77f8785b627f52a8dc1ea45066: Status 404 returned error can't find the container with id 21e3c8fbc7e6bfda9b4b605241e4956e6a956e77f8785b627f52a8dc1ea45066 Nov 26 08:49:07 crc kubenswrapper[4940]: I1126 08:49:07.411229 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4rr8h"] Nov 26 08:49:07 crc kubenswrapper[4940]: I1126 08:49:07.653415 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4rr8h" event={"ID":"1111ad8a-5b85-45ea-9bf7-693eef0556db","Type":"ContainerStarted","Data":"21e3c8fbc7e6bfda9b4b605241e4956e6a956e77f8785b627f52a8dc1ea45066"} Nov 26 08:49:07 crc kubenswrapper[4940]: I1126 08:49:07.654944 4940 generic.go:334] "Generic (PLEG): container finished" podID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" containerID="d15c4411a0b64f84f5725ec5905c2cc8e3c40b0e77a602a73809f326899b694e" exitCode=0 Nov 26 08:49:07 crc kubenswrapper[4940]: I1126 08:49:07.654991 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c7d584995-74jn2" event={"ID":"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9","Type":"ContainerDied","Data":"d15c4411a0b64f84f5725ec5905c2cc8e3c40b0e77a602a73809f326899b694e"} Nov 26 08:49:07 crc kubenswrapper[4940]: I1126 08:49:07.655023 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c7d584995-74jn2" event={"ID":"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9","Type":"ContainerStarted","Data":"5d877b1b4f0c44b0e7594057178f2a6659bd502ef12c07421adc09fae247476e"} Nov 26 08:49:08 crc kubenswrapper[4940]: I1126 08:49:08.673512 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c7d584995-74jn2" event={"ID":"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9","Type":"ContainerStarted","Data":"f21e559e0e603497ccd920b25b5ed2429c235847f3783eaa60fcea7c8367b3cf"} Nov 26 08:49:08 crc kubenswrapper[4940]: I1126 08:49:08.673874 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:08 crc kubenswrapper[4940]: I1126 08:49:08.696730 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-c7d584995-74jn2" podStartSLOduration=2.6967058550000003 podStartE2EDuration="2.696705855s" podCreationTimestamp="2025-11-26 08:49:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:49:08.690374373 +0000 UTC m=+6850.210516022" watchObservedRunningTime="2025-11-26 08:49:08.696705855 +0000 UTC m=+6850.216847504" Nov 26 08:49:11 crc kubenswrapper[4940]: I1126 08:49:11.708426 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4rr8h" event={"ID":"1111ad8a-5b85-45ea-9bf7-693eef0556db","Type":"ContainerStarted","Data":"7c207c5d54ed00d7d6c6785ae92205d2f268e76c064786716c261605101d8e1e"} Nov 26 08:49:11 crc kubenswrapper[4940]: I1126 08:49:11.726719 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-4rr8h" podStartSLOduration=2.583400933 podStartE2EDuration="5.72669955s" podCreationTimestamp="2025-11-26 08:49:06 +0000 UTC" firstStartedPulling="2025-11-26 08:49:07.411675649 +0000 UTC m=+6848.931817278" lastFinishedPulling="2025-11-26 08:49:10.554974266 +0000 UTC m=+6852.075115895" observedRunningTime="2025-11-26 08:49:11.723879861 +0000 UTC m=+6853.244021490" watchObservedRunningTime="2025-11-26 08:49:11.72669955 +0000 UTC m=+6853.246841169" Nov 26 08:49:11 crc kubenswrapper[4940]: I1126 08:49:11.734455 4940 scope.go:117] "RemoveContainer" containerID="743ca3bc7c8f8be1b5d0ef7a3636b46db6d36da48ee60a596ad44b094fdb7714" Nov 26 08:49:12 crc kubenswrapper[4940]: I1126 08:49:12.727548 4940 generic.go:334] "Generic (PLEG): container finished" podID="1111ad8a-5b85-45ea-9bf7-693eef0556db" containerID="7c207c5d54ed00d7d6c6785ae92205d2f268e76c064786716c261605101d8e1e" exitCode=0 Nov 26 08:49:12 crc kubenswrapper[4940]: I1126 08:49:12.727666 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4rr8h" event={"ID":"1111ad8a-5b85-45ea-9bf7-693eef0556db","Type":"ContainerDied","Data":"7c207c5d54ed00d7d6c6785ae92205d2f268e76c064786716c261605101d8e1e"} Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.108864 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.174971 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wl7wj\" (UniqueName: \"kubernetes.io/projected/1111ad8a-5b85-45ea-9bf7-693eef0556db-kube-api-access-wl7wj\") pod \"1111ad8a-5b85-45ea-9bf7-693eef0556db\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.175136 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-config-data\") pod \"1111ad8a-5b85-45ea-9bf7-693eef0556db\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.175278 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-combined-ca-bundle\") pod \"1111ad8a-5b85-45ea-9bf7-693eef0556db\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.175360 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1111ad8a-5b85-45ea-9bf7-693eef0556db-logs\") pod \"1111ad8a-5b85-45ea-9bf7-693eef0556db\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.175977 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1111ad8a-5b85-45ea-9bf7-693eef0556db-logs" (OuterVolumeSpecName: "logs") pod "1111ad8a-5b85-45ea-9bf7-693eef0556db" (UID: "1111ad8a-5b85-45ea-9bf7-693eef0556db"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.176096 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-scripts\") pod \"1111ad8a-5b85-45ea-9bf7-693eef0556db\" (UID: \"1111ad8a-5b85-45ea-9bf7-693eef0556db\") " Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.177163 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1111ad8a-5b85-45ea-9bf7-693eef0556db-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.183969 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-scripts" (OuterVolumeSpecName: "scripts") pod "1111ad8a-5b85-45ea-9bf7-693eef0556db" (UID: "1111ad8a-5b85-45ea-9bf7-693eef0556db"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.187868 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1111ad8a-5b85-45ea-9bf7-693eef0556db-kube-api-access-wl7wj" (OuterVolumeSpecName: "kube-api-access-wl7wj") pod "1111ad8a-5b85-45ea-9bf7-693eef0556db" (UID: "1111ad8a-5b85-45ea-9bf7-693eef0556db"). InnerVolumeSpecName "kube-api-access-wl7wj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.201309 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-config-data" (OuterVolumeSpecName: "config-data") pod "1111ad8a-5b85-45ea-9bf7-693eef0556db" (UID: "1111ad8a-5b85-45ea-9bf7-693eef0556db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.206424 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1111ad8a-5b85-45ea-9bf7-693eef0556db" (UID: "1111ad8a-5b85-45ea-9bf7-693eef0556db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.279219 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.279250 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.279260 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1111ad8a-5b85-45ea-9bf7-693eef0556db-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.279270 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wl7wj\" (UniqueName: \"kubernetes.io/projected/1111ad8a-5b85-45ea-9bf7-693eef0556db-kube-api-access-wl7wj\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.762997 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4rr8h" event={"ID":"1111ad8a-5b85-45ea-9bf7-693eef0556db","Type":"ContainerDied","Data":"21e3c8fbc7e6bfda9b4b605241e4956e6a956e77f8785b627f52a8dc1ea45066"} Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.763270 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21e3c8fbc7e6bfda9b4b605241e4956e6a956e77f8785b627f52a8dc1ea45066" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.763265 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4rr8h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.827168 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6b875cd8bd-csl9h"] Nov 26 08:49:14 crc kubenswrapper[4940]: E1126 08:49:14.827850 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1111ad8a-5b85-45ea-9bf7-693eef0556db" containerName="placement-db-sync" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.827879 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1111ad8a-5b85-45ea-9bf7-693eef0556db" containerName="placement-db-sync" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.828159 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1111ad8a-5b85-45ea-9bf7-693eef0556db" containerName="placement-db-sync" Nov 26 08:49:14 crc kubenswrapper[4940]: E1126 08:49:14.836140 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1111ad8a_5b85_45ea_9bf7_693eef0556db.slice\": RecentStats: unable to find data in memory cache]" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.842024 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6b875cd8bd-csl9h"] Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.842150 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.850438 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.850770 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.851007 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hf7c5" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.889101 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-config-data\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.889211 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-scripts\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.889299 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d242927a-47af-43db-9ecb-e25ba58cb291-logs\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.889526 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cscz\" (UniqueName: \"kubernetes.io/projected/d242927a-47af-43db-9ecb-e25ba58cb291-kube-api-access-7cscz\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.889736 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-combined-ca-bundle\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.991237 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-scripts\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.991340 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d242927a-47af-43db-9ecb-e25ba58cb291-logs\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.991423 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cscz\" (UniqueName: \"kubernetes.io/projected/d242927a-47af-43db-9ecb-e25ba58cb291-kube-api-access-7cscz\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.991492 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-combined-ca-bundle\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.991525 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-config-data\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.991962 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d242927a-47af-43db-9ecb-e25ba58cb291-logs\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.996147 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-combined-ca-bundle\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:14 crc kubenswrapper[4940]: I1126 08:49:14.996660 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-config-data\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:15 crc kubenswrapper[4940]: I1126 08:49:15.000218 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d242927a-47af-43db-9ecb-e25ba58cb291-scripts\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:15 crc kubenswrapper[4940]: I1126 08:49:15.019683 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cscz\" (UniqueName: \"kubernetes.io/projected/d242927a-47af-43db-9ecb-e25ba58cb291-kube-api-access-7cscz\") pod \"placement-6b875cd8bd-csl9h\" (UID: \"d242927a-47af-43db-9ecb-e25ba58cb291\") " pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:15 crc kubenswrapper[4940]: I1126 08:49:15.165368 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:15 crc kubenswrapper[4940]: I1126 08:49:15.622480 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6b875cd8bd-csl9h"] Nov 26 08:49:15 crc kubenswrapper[4940]: I1126 08:49:15.772665 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6b875cd8bd-csl9h" event={"ID":"d242927a-47af-43db-9ecb-e25ba58cb291","Type":"ContainerStarted","Data":"259955ab3a11155de684d3480021dd98959f8c5be929a207c73fdd1bbee0255c"} Nov 26 08:49:16 crc kubenswrapper[4940]: I1126 08:49:16.784125 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6b875cd8bd-csl9h" event={"ID":"d242927a-47af-43db-9ecb-e25ba58cb291","Type":"ContainerStarted","Data":"a3e5a0fa068aaaf0fae8a60b16320929a4a663ba7be99931b75c2a23e753cd28"} Nov 26 08:49:16 crc kubenswrapper[4940]: I1126 08:49:16.784704 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:16 crc kubenswrapper[4940]: I1126 08:49:16.784720 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:16 crc kubenswrapper[4940]: I1126 08:49:16.784732 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6b875cd8bd-csl9h" event={"ID":"d242927a-47af-43db-9ecb-e25ba58cb291","Type":"ContainerStarted","Data":"76f58f70887a05183f8599b199fda7ddd47c5080bc16d6b3306e144570cf67ed"} Nov 26 08:49:16 crc kubenswrapper[4940]: I1126 08:49:16.810125 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6b875cd8bd-csl9h" podStartSLOduration=2.810100931 podStartE2EDuration="2.810100931s" podCreationTimestamp="2025-11-26 08:49:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:49:16.804491723 +0000 UTC m=+6858.324633342" watchObservedRunningTime="2025-11-26 08:49:16.810100931 +0000 UTC m=+6858.330242560" Nov 26 08:49:16 crc kubenswrapper[4940]: I1126 08:49:16.849243 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:49:16 crc kubenswrapper[4940]: I1126 08:49:16.911324 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f5647cf7c-cw4p8"] Nov 26 08:49:16 crc kubenswrapper[4940]: I1126 08:49:16.911868 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" podUID="1846409b-94ff-442a-8021-cec25d90e9cc" containerName="dnsmasq-dns" containerID="cri-o://ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff" gracePeriod=10 Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.493808 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.656372 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-config\") pod \"1846409b-94ff-442a-8021-cec25d90e9cc\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.656579 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzpf2\" (UniqueName: \"kubernetes.io/projected/1846409b-94ff-442a-8021-cec25d90e9cc-kube-api-access-rzpf2\") pod \"1846409b-94ff-442a-8021-cec25d90e9cc\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.656731 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-nb\") pod \"1846409b-94ff-442a-8021-cec25d90e9cc\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.656821 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-sb\") pod \"1846409b-94ff-442a-8021-cec25d90e9cc\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.656888 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-dns-svc\") pod \"1846409b-94ff-442a-8021-cec25d90e9cc\" (UID: \"1846409b-94ff-442a-8021-cec25d90e9cc\") " Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.674865 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1846409b-94ff-442a-8021-cec25d90e9cc-kube-api-access-rzpf2" (OuterVolumeSpecName: "kube-api-access-rzpf2") pod "1846409b-94ff-442a-8021-cec25d90e9cc" (UID: "1846409b-94ff-442a-8021-cec25d90e9cc"). InnerVolumeSpecName "kube-api-access-rzpf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.717119 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-config" (OuterVolumeSpecName: "config") pod "1846409b-94ff-442a-8021-cec25d90e9cc" (UID: "1846409b-94ff-442a-8021-cec25d90e9cc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.718387 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1846409b-94ff-442a-8021-cec25d90e9cc" (UID: "1846409b-94ff-442a-8021-cec25d90e9cc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.719747 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1846409b-94ff-442a-8021-cec25d90e9cc" (UID: "1846409b-94ff-442a-8021-cec25d90e9cc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.733780 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1846409b-94ff-442a-8021-cec25d90e9cc" (UID: "1846409b-94ff-442a-8021-cec25d90e9cc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.759288 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzpf2\" (UniqueName: \"kubernetes.io/projected/1846409b-94ff-442a-8021-cec25d90e9cc-kube-api-access-rzpf2\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.759323 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.759336 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.759350 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.759362 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1846409b-94ff-442a-8021-cec25d90e9cc-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.793966 4940 generic.go:334] "Generic (PLEG): container finished" podID="1846409b-94ff-442a-8021-cec25d90e9cc" containerID="ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff" exitCode=0 Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.794087 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" event={"ID":"1846409b-94ff-442a-8021-cec25d90e9cc","Type":"ContainerDied","Data":"ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff"} Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.794160 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" event={"ID":"1846409b-94ff-442a-8021-cec25d90e9cc","Type":"ContainerDied","Data":"9c95800eb58370d048343b9e4f05d6d0ba802ccad6a7ff9ef6569461eba88772"} Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.794181 4940 scope.go:117] "RemoveContainer" containerID="ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.795194 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f5647cf7c-cw4p8" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.821098 4940 scope.go:117] "RemoveContainer" containerID="54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.833401 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f5647cf7c-cw4p8"] Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.840204 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f5647cf7c-cw4p8"] Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.863707 4940 scope.go:117] "RemoveContainer" containerID="ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff" Nov 26 08:49:17 crc kubenswrapper[4940]: E1126 08:49:17.865994 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff\": container with ID starting with ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff not found: ID does not exist" containerID="ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.866032 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff"} err="failed to get container status \"ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff\": rpc error: code = NotFound desc = could not find container \"ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff\": container with ID starting with ef955943ea97e6583e949cdb0668fd2aa90d96d673f764dc8b2207c2072f25ff not found: ID does not exist" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.866085 4940 scope.go:117] "RemoveContainer" containerID="54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594" Nov 26 08:49:17 crc kubenswrapper[4940]: E1126 08:49:17.866553 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594\": container with ID starting with 54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594 not found: ID does not exist" containerID="54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594" Nov 26 08:49:17 crc kubenswrapper[4940]: I1126 08:49:17.866591 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594"} err="failed to get container status \"54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594\": rpc error: code = NotFound desc = could not find container \"54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594\": container with ID starting with 54df67add9cd07c050d0fee0f14cfe273fec838b4ae14d0d4c0e6801c9138594 not found: ID does not exist" Nov 26 08:49:19 crc kubenswrapper[4940]: I1126 08:49:19.177845 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1846409b-94ff-442a-8021-cec25d90e9cc" path="/var/lib/kubelet/pods/1846409b-94ff-442a-8021-cec25d90e9cc/volumes" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.165882 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:49:21 crc kubenswrapper[4940]: E1126 08:49:21.166529 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.301843 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9xczk"] Nov 26 08:49:21 crc kubenswrapper[4940]: E1126 08:49:21.302539 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1846409b-94ff-442a-8021-cec25d90e9cc" containerName="init" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.302574 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1846409b-94ff-442a-8021-cec25d90e9cc" containerName="init" Nov 26 08:49:21 crc kubenswrapper[4940]: E1126 08:49:21.302613 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1846409b-94ff-442a-8021-cec25d90e9cc" containerName="dnsmasq-dns" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.302627 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1846409b-94ff-442a-8021-cec25d90e9cc" containerName="dnsmasq-dns" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.303082 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1846409b-94ff-442a-8021-cec25d90e9cc" containerName="dnsmasq-dns" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.305604 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.324547 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-catalog-content\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.324680 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9xczk"] Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.324745 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-utilities\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.324855 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxqzd\" (UniqueName: \"kubernetes.io/projected/37d49ccb-a923-4371-abd6-c62d9508010b-kube-api-access-hxqzd\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.426372 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-catalog-content\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.426697 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-utilities\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.426733 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxqzd\" (UniqueName: \"kubernetes.io/projected/37d49ccb-a923-4371-abd6-c62d9508010b-kube-api-access-hxqzd\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.427106 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-catalog-content\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.427219 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-utilities\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.445740 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxqzd\" (UniqueName: \"kubernetes.io/projected/37d49ccb-a923-4371-abd6-c62d9508010b-kube-api-access-hxqzd\") pod \"redhat-operators-9xczk\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:21 crc kubenswrapper[4940]: I1126 08:49:21.642475 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:22 crc kubenswrapper[4940]: I1126 08:49:22.162969 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9xczk"] Nov 26 08:49:22 crc kubenswrapper[4940]: I1126 08:49:22.849743 4940 generic.go:334] "Generic (PLEG): container finished" podID="37d49ccb-a923-4371-abd6-c62d9508010b" containerID="d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519" exitCode=0 Nov 26 08:49:22 crc kubenswrapper[4940]: I1126 08:49:22.849847 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xczk" event={"ID":"37d49ccb-a923-4371-abd6-c62d9508010b","Type":"ContainerDied","Data":"d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519"} Nov 26 08:49:22 crc kubenswrapper[4940]: I1126 08:49:22.850061 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xczk" event={"ID":"37d49ccb-a923-4371-abd6-c62d9508010b","Type":"ContainerStarted","Data":"67d111d1be0fa21c6806610c8dce59649fbf7cdb3170ee5824542a45aabc0917"} Nov 26 08:49:23 crc kubenswrapper[4940]: I1126 08:49:23.862421 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xczk" event={"ID":"37d49ccb-a923-4371-abd6-c62d9508010b","Type":"ContainerStarted","Data":"e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462"} Nov 26 08:49:24 crc kubenswrapper[4940]: I1126 08:49:24.872223 4940 generic.go:334] "Generic (PLEG): container finished" podID="37d49ccb-a923-4371-abd6-c62d9508010b" containerID="e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462" exitCode=0 Nov 26 08:49:24 crc kubenswrapper[4940]: I1126 08:49:24.872274 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xczk" event={"ID":"37d49ccb-a923-4371-abd6-c62d9508010b","Type":"ContainerDied","Data":"e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462"} Nov 26 08:49:25 crc kubenswrapper[4940]: I1126 08:49:25.886941 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xczk" event={"ID":"37d49ccb-a923-4371-abd6-c62d9508010b","Type":"ContainerStarted","Data":"886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc"} Nov 26 08:49:25 crc kubenswrapper[4940]: I1126 08:49:25.919384 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9xczk" podStartSLOduration=2.381048546 podStartE2EDuration="4.9193653s" podCreationTimestamp="2025-11-26 08:49:21 +0000 UTC" firstStartedPulling="2025-11-26 08:49:22.850927492 +0000 UTC m=+6864.371069121" lastFinishedPulling="2025-11-26 08:49:25.389244256 +0000 UTC m=+6866.909385875" observedRunningTime="2025-11-26 08:49:25.912927685 +0000 UTC m=+6867.433069314" watchObservedRunningTime="2025-11-26 08:49:25.9193653 +0000 UTC m=+6867.439506919" Nov 26 08:49:31 crc kubenswrapper[4940]: I1126 08:49:31.643161 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:31 crc kubenswrapper[4940]: I1126 08:49:31.643882 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:31 crc kubenswrapper[4940]: I1126 08:49:31.693459 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:32 crc kubenswrapper[4940]: I1126 08:49:32.014410 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:32 crc kubenswrapper[4940]: I1126 08:49:32.087578 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9xczk"] Nov 26 08:49:33 crc kubenswrapper[4940]: I1126 08:49:33.982071 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9xczk" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" containerName="registry-server" containerID="cri-o://886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc" gracePeriod=2 Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.510536 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.670029 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-catalog-content\") pod \"37d49ccb-a923-4371-abd6-c62d9508010b\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.670161 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxqzd\" (UniqueName: \"kubernetes.io/projected/37d49ccb-a923-4371-abd6-c62d9508010b-kube-api-access-hxqzd\") pod \"37d49ccb-a923-4371-abd6-c62d9508010b\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.670208 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-utilities\") pod \"37d49ccb-a923-4371-abd6-c62d9508010b\" (UID: \"37d49ccb-a923-4371-abd6-c62d9508010b\") " Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.670952 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-utilities" (OuterVolumeSpecName: "utilities") pod "37d49ccb-a923-4371-abd6-c62d9508010b" (UID: "37d49ccb-a923-4371-abd6-c62d9508010b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.675167 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37d49ccb-a923-4371-abd6-c62d9508010b-kube-api-access-hxqzd" (OuterVolumeSpecName: "kube-api-access-hxqzd") pod "37d49ccb-a923-4371-abd6-c62d9508010b" (UID: "37d49ccb-a923-4371-abd6-c62d9508010b"). InnerVolumeSpecName "kube-api-access-hxqzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.772915 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxqzd\" (UniqueName: \"kubernetes.io/projected/37d49ccb-a923-4371-abd6-c62d9508010b-kube-api-access-hxqzd\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.772952 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.997175 4940 generic.go:334] "Generic (PLEG): container finished" podID="37d49ccb-a923-4371-abd6-c62d9508010b" containerID="886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc" exitCode=0 Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.997224 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xczk" event={"ID":"37d49ccb-a923-4371-abd6-c62d9508010b","Type":"ContainerDied","Data":"886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc"} Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.997254 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9xczk" event={"ID":"37d49ccb-a923-4371-abd6-c62d9508010b","Type":"ContainerDied","Data":"67d111d1be0fa21c6806610c8dce59649fbf7cdb3170ee5824542a45aabc0917"} Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.997266 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9xczk" Nov 26 08:49:34 crc kubenswrapper[4940]: I1126 08:49:34.997273 4940 scope.go:117] "RemoveContainer" containerID="886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.025757 4940 scope.go:117] "RemoveContainer" containerID="e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.053181 4940 scope.go:117] "RemoveContainer" containerID="d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.090253 4940 scope.go:117] "RemoveContainer" containerID="886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc" Nov 26 08:49:35 crc kubenswrapper[4940]: E1126 08:49:35.090647 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc\": container with ID starting with 886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc not found: ID does not exist" containerID="886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.090694 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc"} err="failed to get container status \"886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc\": rpc error: code = NotFound desc = could not find container \"886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc\": container with ID starting with 886c37c86613bf0155f799b3c7d6ea7cb8df0aec7cf02cc752e30bb67f0d9bfc not found: ID does not exist" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.090722 4940 scope.go:117] "RemoveContainer" containerID="e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462" Nov 26 08:49:35 crc kubenswrapper[4940]: E1126 08:49:35.090983 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462\": container with ID starting with e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462 not found: ID does not exist" containerID="e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.091004 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462"} err="failed to get container status \"e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462\": rpc error: code = NotFound desc = could not find container \"e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462\": container with ID starting with e06c3d853797bc39fc29deddb1589230f83d733beccf4aa16076893721dc8462 not found: ID does not exist" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.091019 4940 scope.go:117] "RemoveContainer" containerID="d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519" Nov 26 08:49:35 crc kubenswrapper[4940]: E1126 08:49:35.091366 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519\": container with ID starting with d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519 not found: ID does not exist" containerID="d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.091398 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519"} err="failed to get container status \"d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519\": rpc error: code = NotFound desc = could not find container \"d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519\": container with ID starting with d721f95de87a1a245b192cdd42e82f8ec5de2768e5fe1efe68d71274809bc519 not found: ID does not exist" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.175539 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37d49ccb-a923-4371-abd6-c62d9508010b" (UID: "37d49ccb-a923-4371-abd6-c62d9508010b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.179169 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37d49ccb-a923-4371-abd6-c62d9508010b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:49:35 crc kubenswrapper[4940]: E1126 08:49:35.335255 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37d49ccb_a923_4371_abd6_c62d9508010b.slice/crio-67d111d1be0fa21c6806610c8dce59649fbf7cdb3170ee5824542a45aabc0917\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37d49ccb_a923_4371_abd6_c62d9508010b.slice\": RecentStats: unable to find data in memory cache]" Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.337995 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9xczk"] Nov 26 08:49:35 crc kubenswrapper[4940]: I1126 08:49:35.346636 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9xczk"] Nov 26 08:49:36 crc kubenswrapper[4940]: I1126 08:49:36.165302 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:49:36 crc kubenswrapper[4940]: E1126 08:49:36.166032 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:49:37 crc kubenswrapper[4940]: I1126 08:49:37.178517 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" path="/var/lib/kubelet/pods/37d49ccb-a923-4371-abd6-c62d9508010b/volumes" Nov 26 08:49:46 crc kubenswrapper[4940]: I1126 08:49:46.189455 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:46 crc kubenswrapper[4940]: I1126 08:49:46.351233 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6b875cd8bd-csl9h" Nov 26 08:49:47 crc kubenswrapper[4940]: I1126 08:49:47.165913 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:49:47 crc kubenswrapper[4940]: E1126 08:49:47.166399 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:50:01 crc kubenswrapper[4940]: I1126 08:50:01.165803 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:50:01 crc kubenswrapper[4940]: E1126 08:50:01.166481 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:50:09 crc kubenswrapper[4940]: I1126 08:50:09.889448 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-m4qtp"] Nov 26 08:50:09 crc kubenswrapper[4940]: E1126 08:50:09.890472 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" containerName="extract-content" Nov 26 08:50:09 crc kubenswrapper[4940]: I1126 08:50:09.890512 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" containerName="extract-content" Nov 26 08:50:09 crc kubenswrapper[4940]: E1126 08:50:09.890531 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" containerName="registry-server" Nov 26 08:50:09 crc kubenswrapper[4940]: I1126 08:50:09.890539 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" containerName="registry-server" Nov 26 08:50:09 crc kubenswrapper[4940]: E1126 08:50:09.890566 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" containerName="extract-utilities" Nov 26 08:50:09 crc kubenswrapper[4940]: I1126 08:50:09.890575 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" containerName="extract-utilities" Nov 26 08:50:09 crc kubenswrapper[4940]: I1126 08:50:09.890795 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="37d49ccb-a923-4371-abd6-c62d9508010b" containerName="registry-server" Nov 26 08:50:09 crc kubenswrapper[4940]: I1126 08:50:09.891523 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:09 crc kubenswrapper[4940]: I1126 08:50:09.901667 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-m4qtp"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.013120 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-xmng9"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.029324 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-c73b-account-create-update-9fgjt"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.035555 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.035827 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.045472 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.061869 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-xmng9"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.067239 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9813b0a0-b69f-4db5-8746-50637c407ca5-operator-scripts\") pod \"nova-api-db-create-m4qtp\" (UID: \"9813b0a0-b69f-4db5-8746-50637c407ca5\") " pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.067698 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t85x\" (UniqueName: \"kubernetes.io/projected/9813b0a0-b69f-4db5-8746-50637c407ca5-kube-api-access-8t85x\") pod \"nova-api-db-create-m4qtp\" (UID: \"9813b0a0-b69f-4db5-8746-50637c407ca5\") " pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.077937 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c73b-account-create-update-9fgjt"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.091832 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-pltkp"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.099214 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.120908 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-pltkp"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.135543 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-448f-account-create-update-tdw5k"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.136879 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.139877 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.151152 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-448f-account-create-update-tdw5k"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.169348 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vvzj\" (UniqueName: \"kubernetes.io/projected/f271bc7c-8da3-4fdf-b77c-498d89760a85-kube-api-access-9vvzj\") pod \"nova-cell0-db-create-xmng9\" (UID: \"f271bc7c-8da3-4fdf-b77c-498d89760a85\") " pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.169433 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2p6p8\" (UniqueName: \"kubernetes.io/projected/3c9f8615-a457-4f58-921f-41e784d31923-kube-api-access-2p6p8\") pod \"nova-api-c73b-account-create-update-9fgjt\" (UID: \"3c9f8615-a457-4f58-921f-41e784d31923\") " pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.169548 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t85x\" (UniqueName: \"kubernetes.io/projected/9813b0a0-b69f-4db5-8746-50637c407ca5-kube-api-access-8t85x\") pod \"nova-api-db-create-m4qtp\" (UID: \"9813b0a0-b69f-4db5-8746-50637c407ca5\") " pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.169628 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9813b0a0-b69f-4db5-8746-50637c407ca5-operator-scripts\") pod \"nova-api-db-create-m4qtp\" (UID: \"9813b0a0-b69f-4db5-8746-50637c407ca5\") " pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.169693 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f271bc7c-8da3-4fdf-b77c-498d89760a85-operator-scripts\") pod \"nova-cell0-db-create-xmng9\" (UID: \"f271bc7c-8da3-4fdf-b77c-498d89760a85\") " pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.169814 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c9f8615-a457-4f58-921f-41e784d31923-operator-scripts\") pod \"nova-api-c73b-account-create-update-9fgjt\" (UID: \"3c9f8615-a457-4f58-921f-41e784d31923\") " pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.170311 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9813b0a0-b69f-4db5-8746-50637c407ca5-operator-scripts\") pod \"nova-api-db-create-m4qtp\" (UID: \"9813b0a0-b69f-4db5-8746-50637c407ca5\") " pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.189254 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t85x\" (UniqueName: \"kubernetes.io/projected/9813b0a0-b69f-4db5-8746-50637c407ca5-kube-api-access-8t85x\") pod \"nova-api-db-create-m4qtp\" (UID: \"9813b0a0-b69f-4db5-8746-50637c407ca5\") " pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.222247 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.271024 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c9f8615-a457-4f58-921f-41e784d31923-operator-scripts\") pod \"nova-api-c73b-account-create-update-9fgjt\" (UID: \"3c9f8615-a457-4f58-921f-41e784d31923\") " pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.271594 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vvzj\" (UniqueName: \"kubernetes.io/projected/f271bc7c-8da3-4fdf-b77c-498d89760a85-kube-api-access-9vvzj\") pod \"nova-cell0-db-create-xmng9\" (UID: \"f271bc7c-8da3-4fdf-b77c-498d89760a85\") " pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.271693 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c9f8615-a457-4f58-921f-41e784d31923-operator-scripts\") pod \"nova-api-c73b-account-create-update-9fgjt\" (UID: \"3c9f8615-a457-4f58-921f-41e784d31923\") " pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.271902 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2p6p8\" (UniqueName: \"kubernetes.io/projected/3c9f8615-a457-4f58-921f-41e784d31923-kube-api-access-2p6p8\") pod \"nova-api-c73b-account-create-update-9fgjt\" (UID: \"3c9f8615-a457-4f58-921f-41e784d31923\") " pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.271981 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xdw2\" (UniqueName: \"kubernetes.io/projected/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-kube-api-access-9xdw2\") pod \"nova-cell0-448f-account-create-update-tdw5k\" (UID: \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\") " pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.272050 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-operator-scripts\") pod \"nova-cell0-448f-account-create-update-tdw5k\" (UID: \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\") " pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.272072 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46ln9\" (UniqueName: \"kubernetes.io/projected/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-kube-api-access-46ln9\") pod \"nova-cell1-db-create-pltkp\" (UID: \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\") " pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.272107 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-operator-scripts\") pod \"nova-cell1-db-create-pltkp\" (UID: \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\") " pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.272137 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f271bc7c-8da3-4fdf-b77c-498d89760a85-operator-scripts\") pod \"nova-cell0-db-create-xmng9\" (UID: \"f271bc7c-8da3-4fdf-b77c-498d89760a85\") " pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.273786 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f271bc7c-8da3-4fdf-b77c-498d89760a85-operator-scripts\") pod \"nova-cell0-db-create-xmng9\" (UID: \"f271bc7c-8da3-4fdf-b77c-498d89760a85\") " pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.293277 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vvzj\" (UniqueName: \"kubernetes.io/projected/f271bc7c-8da3-4fdf-b77c-498d89760a85-kube-api-access-9vvzj\") pod \"nova-cell0-db-create-xmng9\" (UID: \"f271bc7c-8da3-4fdf-b77c-498d89760a85\") " pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.301371 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2p6p8\" (UniqueName: \"kubernetes.io/projected/3c9f8615-a457-4f58-921f-41e784d31923-kube-api-access-2p6p8\") pod \"nova-api-c73b-account-create-update-9fgjt\" (UID: \"3c9f8615-a457-4f58-921f-41e784d31923\") " pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.302941 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-67ea-account-create-update-p6wmt"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.304191 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.306750 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.335708 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-67ea-account-create-update-p6wmt"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.376629 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xdw2\" (UniqueName: \"kubernetes.io/projected/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-kube-api-access-9xdw2\") pod \"nova-cell0-448f-account-create-update-tdw5k\" (UID: \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\") " pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.376709 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-operator-scripts\") pod \"nova-cell0-448f-account-create-update-tdw5k\" (UID: \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\") " pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.376742 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46ln9\" (UniqueName: \"kubernetes.io/projected/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-kube-api-access-46ln9\") pod \"nova-cell1-db-create-pltkp\" (UID: \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\") " pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.376779 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-operator-scripts\") pod \"nova-cell1-db-create-pltkp\" (UID: \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\") " pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.377728 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-operator-scripts\") pod \"nova-cell1-db-create-pltkp\" (UID: \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\") " pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.379548 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-operator-scripts\") pod \"nova-cell0-448f-account-create-update-tdw5k\" (UID: \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\") " pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.385131 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.398630 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xdw2\" (UniqueName: \"kubernetes.io/projected/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-kube-api-access-9xdw2\") pod \"nova-cell0-448f-account-create-update-tdw5k\" (UID: \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\") " pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.410409 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.411703 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46ln9\" (UniqueName: \"kubernetes.io/projected/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-kube-api-access-46ln9\") pod \"nova-cell1-db-create-pltkp\" (UID: \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\") " pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.419957 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.455803 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.478238 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfdb0c64-4bf2-444d-b6a1-32989360a09e-operator-scripts\") pod \"nova-cell1-67ea-account-create-update-p6wmt\" (UID: \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\") " pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.478341 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh9qg\" (UniqueName: \"kubernetes.io/projected/dfdb0c64-4bf2-444d-b6a1-32989360a09e-kube-api-access-qh9qg\") pod \"nova-cell1-67ea-account-create-update-p6wmt\" (UID: \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\") " pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.579995 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfdb0c64-4bf2-444d-b6a1-32989360a09e-operator-scripts\") pod \"nova-cell1-67ea-account-create-update-p6wmt\" (UID: \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\") " pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.580096 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh9qg\" (UniqueName: \"kubernetes.io/projected/dfdb0c64-4bf2-444d-b6a1-32989360a09e-kube-api-access-qh9qg\") pod \"nova-cell1-67ea-account-create-update-p6wmt\" (UID: \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\") " pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.581135 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfdb0c64-4bf2-444d-b6a1-32989360a09e-operator-scripts\") pod \"nova-cell1-67ea-account-create-update-p6wmt\" (UID: \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\") " pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.602576 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh9qg\" (UniqueName: \"kubernetes.io/projected/dfdb0c64-4bf2-444d-b6a1-32989360a09e-kube-api-access-qh9qg\") pod \"nova-cell1-67ea-account-create-update-p6wmt\" (UID: \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\") " pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.676197 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.727084 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-m4qtp"] Nov 26 08:50:10 crc kubenswrapper[4940]: I1126 08:50:10.877581 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c73b-account-create-update-9fgjt"] Nov 26 08:50:10 crc kubenswrapper[4940]: W1126 08:50:10.884259 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c9f8615_a457_4f58_921f_41e784d31923.slice/crio-af3f8a61ff246f7812dbe7fe6ddbc688481634dd18bae61ddd518ebacc47db64 WatchSource:0}: Error finding container af3f8a61ff246f7812dbe7fe6ddbc688481634dd18bae61ddd518ebacc47db64: Status 404 returned error can't find the container with id af3f8a61ff246f7812dbe7fe6ddbc688481634dd18bae61ddd518ebacc47db64 Nov 26 08:50:11 crc kubenswrapper[4940]: W1126 08:50:11.061257 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf271bc7c_8da3_4fdf_b77c_498d89760a85.slice/crio-5f707d13a5aca51d4c6ea48c2fc3cbffd710e65965adec43e207f042a6f2183c WatchSource:0}: Error finding container 5f707d13a5aca51d4c6ea48c2fc3cbffd710e65965adec43e207f042a6f2183c: Status 404 returned error can't find the container with id 5f707d13a5aca51d4c6ea48c2fc3cbffd710e65965adec43e207f042a6f2183c Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.061805 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-xmng9"] Nov 26 08:50:11 crc kubenswrapper[4940]: W1126 08:50:11.072766 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8cdf461f_a5ee_47a9_95d7_dcb4aef280b5.slice/crio-dacc9471ca95ac7f7c52ebde9eaff534b8a2558851bb1140022324850aff4201 WatchSource:0}: Error finding container dacc9471ca95ac7f7c52ebde9eaff534b8a2558851bb1140022324850aff4201: Status 404 returned error can't find the container with id dacc9471ca95ac7f7c52ebde9eaff534b8a2558851bb1140022324850aff4201 Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.077623 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-pltkp"] Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.157177 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-448f-account-create-update-tdw5k"] Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.256858 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-67ea-account-create-update-p6wmt"] Nov 26 08:50:11 crc kubenswrapper[4940]: W1126 08:50:11.263029 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfdb0c64_4bf2_444d_b6a1_32989360a09e.slice/crio-0b44b14e87b2ab6e65116db4699b8d2b2afbcb546c84b0cd709e78b0fa4aee59 WatchSource:0}: Error finding container 0b44b14e87b2ab6e65116db4699b8d2b2afbcb546c84b0cd709e78b0fa4aee59: Status 404 returned error can't find the container with id 0b44b14e87b2ab6e65116db4699b8d2b2afbcb546c84b0cd709e78b0fa4aee59 Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.354594 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-448f-account-create-update-tdw5k" event={"ID":"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3","Type":"ContainerStarted","Data":"b1bc7ae7c64195634f4dccfc4ecbe049246761cea57dcc4a52546cb5febdb1ab"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.356318 4940 generic.go:334] "Generic (PLEG): container finished" podID="3c9f8615-a457-4f58-921f-41e784d31923" containerID="d816dc9797ef473a1ad0ca1c1ae50e879895ae27ec619dc4679d86bcc206b6eb" exitCode=0 Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.356407 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c73b-account-create-update-9fgjt" event={"ID":"3c9f8615-a457-4f58-921f-41e784d31923","Type":"ContainerDied","Data":"d816dc9797ef473a1ad0ca1c1ae50e879895ae27ec619dc4679d86bcc206b6eb"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.356499 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c73b-account-create-update-9fgjt" event={"ID":"3c9f8615-a457-4f58-921f-41e784d31923","Type":"ContainerStarted","Data":"af3f8a61ff246f7812dbe7fe6ddbc688481634dd18bae61ddd518ebacc47db64"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.357891 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pltkp" event={"ID":"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5","Type":"ContainerStarted","Data":"dacc9471ca95ac7f7c52ebde9eaff534b8a2558851bb1140022324850aff4201"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.360213 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-xmng9" event={"ID":"f271bc7c-8da3-4fdf-b77c-498d89760a85","Type":"ContainerStarted","Data":"29206dd1aaafe63d76a228cc43ca79decd663b97e9e163598ef896d4ddd014d8"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.360244 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-xmng9" event={"ID":"f271bc7c-8da3-4fdf-b77c-498d89760a85","Type":"ContainerStarted","Data":"5f707d13a5aca51d4c6ea48c2fc3cbffd710e65965adec43e207f042a6f2183c"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.361772 4940 generic.go:334] "Generic (PLEG): container finished" podID="9813b0a0-b69f-4db5-8746-50637c407ca5" containerID="b76f3f48e3ee53393e767a51126722cb62c44bb534b50d016619021234eeb42c" exitCode=0 Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.361847 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-m4qtp" event={"ID":"9813b0a0-b69f-4db5-8746-50637c407ca5","Type":"ContainerDied","Data":"b76f3f48e3ee53393e767a51126722cb62c44bb534b50d016619021234eeb42c"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.361875 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-m4qtp" event={"ID":"9813b0a0-b69f-4db5-8746-50637c407ca5","Type":"ContainerStarted","Data":"c1bf1e4dc80e9d7f3836a9d7c27aaf288247d73f1610e79f14f01f9b10e008f7"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.363215 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" event={"ID":"dfdb0c64-4bf2-444d-b6a1-32989360a09e","Type":"ContainerStarted","Data":"0b44b14e87b2ab6e65116db4699b8d2b2afbcb546c84b0cd709e78b0fa4aee59"} Nov 26 08:50:11 crc kubenswrapper[4940]: I1126 08:50:11.390061 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-xmng9" podStartSLOduration=2.390033717 podStartE2EDuration="2.390033717s" podCreationTimestamp="2025-11-26 08:50:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:11.383933262 +0000 UTC m=+6912.904074881" watchObservedRunningTime="2025-11-26 08:50:11.390033717 +0000 UTC m=+6912.910175336" Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.378825 4940 generic.go:334] "Generic (PLEG): container finished" podID="8cdf461f-a5ee-47a9-95d7-dcb4aef280b5" containerID="4fbe1f5b41756c2cd41088fdf944b051d0264ba0d6a46c302b279e60891a86a8" exitCode=0 Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.378903 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pltkp" event={"ID":"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5","Type":"ContainerDied","Data":"4fbe1f5b41756c2cd41088fdf944b051d0264ba0d6a46c302b279e60891a86a8"} Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.382621 4940 generic.go:334] "Generic (PLEG): container finished" podID="f271bc7c-8da3-4fdf-b77c-498d89760a85" containerID="29206dd1aaafe63d76a228cc43ca79decd663b97e9e163598ef896d4ddd014d8" exitCode=0 Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.382765 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-xmng9" event={"ID":"f271bc7c-8da3-4fdf-b77c-498d89760a85","Type":"ContainerDied","Data":"29206dd1aaafe63d76a228cc43ca79decd663b97e9e163598ef896d4ddd014d8"} Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.384958 4940 generic.go:334] "Generic (PLEG): container finished" podID="dfdb0c64-4bf2-444d-b6a1-32989360a09e" containerID="740c51b8315bfb8a669034ad2f9d328c6e593c8ba897326f821dc35384d080ba" exitCode=0 Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.385064 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" event={"ID":"dfdb0c64-4bf2-444d-b6a1-32989360a09e","Type":"ContainerDied","Data":"740c51b8315bfb8a669034ad2f9d328c6e593c8ba897326f821dc35384d080ba"} Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.387345 4940 generic.go:334] "Generic (PLEG): container finished" podID="7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3" containerID="837ce6010df320a7ed19500c3ba21731eed93c1b2a9d9c30b1238a7e61cc5df0" exitCode=0 Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.387412 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-448f-account-create-update-tdw5k" event={"ID":"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3","Type":"ContainerDied","Data":"837ce6010df320a7ed19500c3ba21731eed93c1b2a9d9c30b1238a7e61cc5df0"} Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.819677 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.830745 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.930026 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9813b0a0-b69f-4db5-8746-50637c407ca5-operator-scripts\") pod \"9813b0a0-b69f-4db5-8746-50637c407ca5\" (UID: \"9813b0a0-b69f-4db5-8746-50637c407ca5\") " Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.930262 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8t85x\" (UniqueName: \"kubernetes.io/projected/9813b0a0-b69f-4db5-8746-50637c407ca5-kube-api-access-8t85x\") pod \"9813b0a0-b69f-4db5-8746-50637c407ca5\" (UID: \"9813b0a0-b69f-4db5-8746-50637c407ca5\") " Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.930309 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c9f8615-a457-4f58-921f-41e784d31923-operator-scripts\") pod \"3c9f8615-a457-4f58-921f-41e784d31923\" (UID: \"3c9f8615-a457-4f58-921f-41e784d31923\") " Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.930348 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2p6p8\" (UniqueName: \"kubernetes.io/projected/3c9f8615-a457-4f58-921f-41e784d31923-kube-api-access-2p6p8\") pod \"3c9f8615-a457-4f58-921f-41e784d31923\" (UID: \"3c9f8615-a457-4f58-921f-41e784d31923\") " Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.930589 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9813b0a0-b69f-4db5-8746-50637c407ca5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9813b0a0-b69f-4db5-8746-50637c407ca5" (UID: "9813b0a0-b69f-4db5-8746-50637c407ca5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.930743 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c9f8615-a457-4f58-921f-41e784d31923-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3c9f8615-a457-4f58-921f-41e784d31923" (UID: "3c9f8615-a457-4f58-921f-41e784d31923"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.931124 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c9f8615-a457-4f58-921f-41e784d31923-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.931145 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9813b0a0-b69f-4db5-8746-50637c407ca5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.939834 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c9f8615-a457-4f58-921f-41e784d31923-kube-api-access-2p6p8" (OuterVolumeSpecName: "kube-api-access-2p6p8") pod "3c9f8615-a457-4f58-921f-41e784d31923" (UID: "3c9f8615-a457-4f58-921f-41e784d31923"). InnerVolumeSpecName "kube-api-access-2p6p8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:12 crc kubenswrapper[4940]: I1126 08:50:12.939904 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9813b0a0-b69f-4db5-8746-50637c407ca5-kube-api-access-8t85x" (OuterVolumeSpecName: "kube-api-access-8t85x") pod "9813b0a0-b69f-4db5-8746-50637c407ca5" (UID: "9813b0a0-b69f-4db5-8746-50637c407ca5"). InnerVolumeSpecName "kube-api-access-8t85x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.032651 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8t85x\" (UniqueName: \"kubernetes.io/projected/9813b0a0-b69f-4db5-8746-50637c407ca5-kube-api-access-8t85x\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.032678 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2p6p8\" (UniqueName: \"kubernetes.io/projected/3c9f8615-a457-4f58-921f-41e784d31923-kube-api-access-2p6p8\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.402742 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c73b-account-create-update-9fgjt" event={"ID":"3c9f8615-a457-4f58-921f-41e784d31923","Type":"ContainerDied","Data":"af3f8a61ff246f7812dbe7fe6ddbc688481634dd18bae61ddd518ebacc47db64"} Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.402814 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af3f8a61ff246f7812dbe7fe6ddbc688481634dd18bae61ddd518ebacc47db64" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.402773 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c73b-account-create-update-9fgjt" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.405870 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-m4qtp" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.406139 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-m4qtp" event={"ID":"9813b0a0-b69f-4db5-8746-50637c407ca5","Type":"ContainerDied","Data":"c1bf1e4dc80e9d7f3836a9d7c27aaf288247d73f1610e79f14f01f9b10e008f7"} Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.406195 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1bf1e4dc80e9d7f3836a9d7c27aaf288247d73f1610e79f14f01f9b10e008f7" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.779554 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.955420 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-operator-scripts\") pod \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\" (UID: \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\") " Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.955486 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46ln9\" (UniqueName: \"kubernetes.io/projected/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-kube-api-access-46ln9\") pod \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\" (UID: \"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5\") " Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.956860 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8cdf461f-a5ee-47a9-95d7-dcb4aef280b5" (UID: "8cdf461f-a5ee-47a9-95d7-dcb4aef280b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.963210 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.964216 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-kube-api-access-46ln9" (OuterVolumeSpecName: "kube-api-access-46ln9") pod "8cdf461f-a5ee-47a9-95d7-dcb4aef280b5" (UID: "8cdf461f-a5ee-47a9-95d7-dcb4aef280b5"). InnerVolumeSpecName "kube-api-access-46ln9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.969526 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:13 crc kubenswrapper[4940]: I1126 08:50:13.976269 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.057513 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.057548 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46ln9\" (UniqueName: \"kubernetes.io/projected/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5-kube-api-access-46ln9\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.159543 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfdb0c64-4bf2-444d-b6a1-32989360a09e-operator-scripts\") pod \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\" (UID: \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\") " Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.159621 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vvzj\" (UniqueName: \"kubernetes.io/projected/f271bc7c-8da3-4fdf-b77c-498d89760a85-kube-api-access-9vvzj\") pod \"f271bc7c-8da3-4fdf-b77c-498d89760a85\" (UID: \"f271bc7c-8da3-4fdf-b77c-498d89760a85\") " Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.159656 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f271bc7c-8da3-4fdf-b77c-498d89760a85-operator-scripts\") pod \"f271bc7c-8da3-4fdf-b77c-498d89760a85\" (UID: \"f271bc7c-8da3-4fdf-b77c-498d89760a85\") " Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.159718 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qh9qg\" (UniqueName: \"kubernetes.io/projected/dfdb0c64-4bf2-444d-b6a1-32989360a09e-kube-api-access-qh9qg\") pod \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\" (UID: \"dfdb0c64-4bf2-444d-b6a1-32989360a09e\") " Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.159773 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xdw2\" (UniqueName: \"kubernetes.io/projected/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-kube-api-access-9xdw2\") pod \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\" (UID: \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\") " Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.159814 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-operator-scripts\") pod \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\" (UID: \"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3\") " Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.160236 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfdb0c64-4bf2-444d-b6a1-32989360a09e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dfdb0c64-4bf2-444d-b6a1-32989360a09e" (UID: "dfdb0c64-4bf2-444d-b6a1-32989360a09e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.160331 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f271bc7c-8da3-4fdf-b77c-498d89760a85-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f271bc7c-8da3-4fdf-b77c-498d89760a85" (UID: "f271bc7c-8da3-4fdf-b77c-498d89760a85"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.160805 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3" (UID: "7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.161171 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dfdb0c64-4bf2-444d-b6a1-32989360a09e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.161213 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f271bc7c-8da3-4fdf-b77c-498d89760a85-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.161235 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.163636 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-kube-api-access-9xdw2" (OuterVolumeSpecName: "kube-api-access-9xdw2") pod "7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3" (UID: "7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3"). InnerVolumeSpecName "kube-api-access-9xdw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.164022 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f271bc7c-8da3-4fdf-b77c-498d89760a85-kube-api-access-9vvzj" (OuterVolumeSpecName: "kube-api-access-9vvzj") pod "f271bc7c-8da3-4fdf-b77c-498d89760a85" (UID: "f271bc7c-8da3-4fdf-b77c-498d89760a85"). InnerVolumeSpecName "kube-api-access-9vvzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.168090 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfdb0c64-4bf2-444d-b6a1-32989360a09e-kube-api-access-qh9qg" (OuterVolumeSpecName: "kube-api-access-qh9qg") pod "dfdb0c64-4bf2-444d-b6a1-32989360a09e" (UID: "dfdb0c64-4bf2-444d-b6a1-32989360a09e"). InnerVolumeSpecName "kube-api-access-qh9qg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.264022 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qh9qg\" (UniqueName: \"kubernetes.io/projected/dfdb0c64-4bf2-444d-b6a1-32989360a09e-kube-api-access-qh9qg\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.264125 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xdw2\" (UniqueName: \"kubernetes.io/projected/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3-kube-api-access-9xdw2\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.264149 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vvzj\" (UniqueName: \"kubernetes.io/projected/f271bc7c-8da3-4fdf-b77c-498d89760a85-kube-api-access-9vvzj\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.421641 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-xmng9" event={"ID":"f271bc7c-8da3-4fdf-b77c-498d89760a85","Type":"ContainerDied","Data":"5f707d13a5aca51d4c6ea48c2fc3cbffd710e65965adec43e207f042a6f2183c"} Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.421702 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f707d13a5aca51d4c6ea48c2fc3cbffd710e65965adec43e207f042a6f2183c" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.421703 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-xmng9" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.424629 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" event={"ID":"dfdb0c64-4bf2-444d-b6a1-32989360a09e","Type":"ContainerDied","Data":"0b44b14e87b2ab6e65116db4699b8d2b2afbcb546c84b0cd709e78b0fa4aee59"} Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.424691 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b44b14e87b2ab6e65116db4699b8d2b2afbcb546c84b0cd709e78b0fa4aee59" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.424650 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-67ea-account-create-update-p6wmt" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.427559 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-448f-account-create-update-tdw5k" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.427591 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-448f-account-create-update-tdw5k" event={"ID":"7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3","Type":"ContainerDied","Data":"b1bc7ae7c64195634f4dccfc4ecbe049246761cea57dcc4a52546cb5febdb1ab"} Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.427655 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1bc7ae7c64195634f4dccfc4ecbe049246761cea57dcc4a52546cb5febdb1ab" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.430479 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pltkp" event={"ID":"8cdf461f-a5ee-47a9-95d7-dcb4aef280b5","Type":"ContainerDied","Data":"dacc9471ca95ac7f7c52ebde9eaff534b8a2558851bb1140022324850aff4201"} Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.430513 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dacc9471ca95ac7f7c52ebde9eaff534b8a2558851bb1140022324850aff4201" Nov 26 08:50:14 crc kubenswrapper[4940]: I1126 08:50:14.430607 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pltkp" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.283241 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xktpm"] Nov 26 08:50:15 crc kubenswrapper[4940]: E1126 08:50:15.288229 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9813b0a0-b69f-4db5-8746-50637c407ca5" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.288463 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9813b0a0-b69f-4db5-8746-50637c407ca5" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: E1126 08:50:15.288556 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.288631 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: E1126 08:50:15.288762 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cdf461f-a5ee-47a9-95d7-dcb4aef280b5" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.288840 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cdf461f-a5ee-47a9-95d7-dcb4aef280b5" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: E1126 08:50:15.288945 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c9f8615-a457-4f58-921f-41e784d31923" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.289026 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c9f8615-a457-4f58-921f-41e784d31923" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: E1126 08:50:15.289125 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfdb0c64-4bf2-444d-b6a1-32989360a09e" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.289187 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfdb0c64-4bf2-444d-b6a1-32989360a09e" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: E1126 08:50:15.289432 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f271bc7c-8da3-4fdf-b77c-498d89760a85" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.289512 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f271bc7c-8da3-4fdf-b77c-498d89760a85" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.289902 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.290001 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c9f8615-a457-4f58-921f-41e784d31923" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.290115 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9813b0a0-b69f-4db5-8746-50637c407ca5" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.290205 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f271bc7c-8da3-4fdf-b77c-498d89760a85" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.290307 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfdb0c64-4bf2-444d-b6a1-32989360a09e" containerName="mariadb-account-create-update" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.290412 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cdf461f-a5ee-47a9-95d7-dcb4aef280b5" containerName="mariadb-database-create" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.291301 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.295732 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.295735 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.296062 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-2kkqf" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.304248 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xktpm"] Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.490734 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-scripts\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.490881 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69sgx\" (UniqueName: \"kubernetes.io/projected/8db993ee-766e-4c04-a4a3-8e6d1051101d-kube-api-access-69sgx\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.491019 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-config-data\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.491142 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.592974 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-scripts\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.593024 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69sgx\" (UniqueName: \"kubernetes.io/projected/8db993ee-766e-4c04-a4a3-8e6d1051101d-kube-api-access-69sgx\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.593092 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-config-data\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.593165 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.597086 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.597434 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-config-data\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.598584 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-scripts\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.608404 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69sgx\" (UniqueName: \"kubernetes.io/projected/8db993ee-766e-4c04-a4a3-8e6d1051101d-kube-api-access-69sgx\") pod \"nova-cell0-conductor-db-sync-xktpm\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:15 crc kubenswrapper[4940]: I1126 08:50:15.611531 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:16 crc kubenswrapper[4940]: W1126 08:50:16.086305 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8db993ee_766e_4c04_a4a3_8e6d1051101d.slice/crio-8e27f5f43701323c4c809be210a233cfb48b54b4fb1972202d6440e4823601de WatchSource:0}: Error finding container 8e27f5f43701323c4c809be210a233cfb48b54b4fb1972202d6440e4823601de: Status 404 returned error can't find the container with id 8e27f5f43701323c4c809be210a233cfb48b54b4fb1972202d6440e4823601de Nov 26 08:50:16 crc kubenswrapper[4940]: I1126 08:50:16.087430 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xktpm"] Nov 26 08:50:16 crc kubenswrapper[4940]: I1126 08:50:16.165273 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:50:16 crc kubenswrapper[4940]: E1126 08:50:16.165505 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:50:16 crc kubenswrapper[4940]: I1126 08:50:16.448882 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-xktpm" event={"ID":"8db993ee-766e-4c04-a4a3-8e6d1051101d","Type":"ContainerStarted","Data":"8e27f5f43701323c4c809be210a233cfb48b54b4fb1972202d6440e4823601de"} Nov 26 08:50:25 crc kubenswrapper[4940]: I1126 08:50:25.557033 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-xktpm" event={"ID":"8db993ee-766e-4c04-a4a3-8e6d1051101d","Type":"ContainerStarted","Data":"45cc08322947d7ec2e089f5fa0574744d38b51fc39376e740de93a78a1c6a311"} Nov 26 08:50:25 crc kubenswrapper[4940]: I1126 08:50:25.579830 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-xktpm" podStartSLOduration=1.7768617949999999 podStartE2EDuration="10.579805694s" podCreationTimestamp="2025-11-26 08:50:15 +0000 UTC" firstStartedPulling="2025-11-26 08:50:16.088883081 +0000 UTC m=+6917.609024700" lastFinishedPulling="2025-11-26 08:50:24.89182698 +0000 UTC m=+6926.411968599" observedRunningTime="2025-11-26 08:50:25.57402524 +0000 UTC m=+6927.094166859" watchObservedRunningTime="2025-11-26 08:50:25.579805694 +0000 UTC m=+6927.099947303" Nov 26 08:50:27 crc kubenswrapper[4940]: I1126 08:50:27.166162 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:50:27 crc kubenswrapper[4940]: E1126 08:50:27.166703 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:50:30 crc kubenswrapper[4940]: I1126 08:50:30.615790 4940 generic.go:334] "Generic (PLEG): container finished" podID="8db993ee-766e-4c04-a4a3-8e6d1051101d" containerID="45cc08322947d7ec2e089f5fa0574744d38b51fc39376e740de93a78a1c6a311" exitCode=0 Nov 26 08:50:30 crc kubenswrapper[4940]: I1126 08:50:30.615881 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-xktpm" event={"ID":"8db993ee-766e-4c04-a4a3-8e6d1051101d","Type":"ContainerDied","Data":"45cc08322947d7ec2e089f5fa0574744d38b51fc39376e740de93a78a1c6a311"} Nov 26 08:50:31 crc kubenswrapper[4940]: I1126 08:50:31.901191 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:31.999467 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-config-data\") pod \"8db993ee-766e-4c04-a4a3-8e6d1051101d\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:31.999851 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69sgx\" (UniqueName: \"kubernetes.io/projected/8db993ee-766e-4c04-a4a3-8e6d1051101d-kube-api-access-69sgx\") pod \"8db993ee-766e-4c04-a4a3-8e6d1051101d\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.000310 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-scripts\") pod \"8db993ee-766e-4c04-a4a3-8e6d1051101d\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.000345 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-combined-ca-bundle\") pod \"8db993ee-766e-4c04-a4a3-8e6d1051101d\" (UID: \"8db993ee-766e-4c04-a4a3-8e6d1051101d\") " Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.004328 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8db993ee-766e-4c04-a4a3-8e6d1051101d-kube-api-access-69sgx" (OuterVolumeSpecName: "kube-api-access-69sgx") pod "8db993ee-766e-4c04-a4a3-8e6d1051101d" (UID: "8db993ee-766e-4c04-a4a3-8e6d1051101d"). InnerVolumeSpecName "kube-api-access-69sgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.005226 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-scripts" (OuterVolumeSpecName: "scripts") pod "8db993ee-766e-4c04-a4a3-8e6d1051101d" (UID: "8db993ee-766e-4c04-a4a3-8e6d1051101d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.025920 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-config-data" (OuterVolumeSpecName: "config-data") pod "8db993ee-766e-4c04-a4a3-8e6d1051101d" (UID: "8db993ee-766e-4c04-a4a3-8e6d1051101d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.047396 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8db993ee-766e-4c04-a4a3-8e6d1051101d" (UID: "8db993ee-766e-4c04-a4a3-8e6d1051101d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.104204 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.104241 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.104255 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8db993ee-766e-4c04-a4a3-8e6d1051101d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.104266 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69sgx\" (UniqueName: \"kubernetes.io/projected/8db993ee-766e-4c04-a4a3-8e6d1051101d-kube-api-access-69sgx\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.634357 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-xktpm" event={"ID":"8db993ee-766e-4c04-a4a3-8e6d1051101d","Type":"ContainerDied","Data":"8e27f5f43701323c4c809be210a233cfb48b54b4fb1972202d6440e4823601de"} Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.634404 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e27f5f43701323c4c809be210a233cfb48b54b4fb1972202d6440e4823601de" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.634502 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-xktpm" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.829420 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:50:32 crc kubenswrapper[4940]: E1126 08:50:32.829849 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8db993ee-766e-4c04-a4a3-8e6d1051101d" containerName="nova-cell0-conductor-db-sync" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.829868 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8db993ee-766e-4c04-a4a3-8e6d1051101d" containerName="nova-cell0-conductor-db-sync" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.830105 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8db993ee-766e-4c04-a4a3-8e6d1051101d" containerName="nova-cell0-conductor-db-sync" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.830828 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.832956 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.835348 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-2kkqf" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.860460 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.918547 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.918641 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:32 crc kubenswrapper[4940]: I1126 08:50:32.918753 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5wgq\" (UniqueName: \"kubernetes.io/projected/ccdda4af-b9bc-4225-9310-b54709e7ee09-kube-api-access-j5wgq\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.021110 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5wgq\" (UniqueName: \"kubernetes.io/projected/ccdda4af-b9bc-4225-9310-b54709e7ee09-kube-api-access-j5wgq\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.021219 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.021273 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.033412 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.039496 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.046642 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5wgq\" (UniqueName: \"kubernetes.io/projected/ccdda4af-b9bc-4225-9310-b54709e7ee09-kube-api-access-j5wgq\") pod \"nova-cell0-conductor-0\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.151688 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.417431 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:50:33 crc kubenswrapper[4940]: W1126 08:50:33.423468 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podccdda4af_b9bc_4225_9310_b54709e7ee09.slice/crio-671aaecf499d674dd1ac560f38d14fc90f49dc15a01d67479fd56ba59bb4bb8f WatchSource:0}: Error finding container 671aaecf499d674dd1ac560f38d14fc90f49dc15a01d67479fd56ba59bb4bb8f: Status 404 returned error can't find the container with id 671aaecf499d674dd1ac560f38d14fc90f49dc15a01d67479fd56ba59bb4bb8f Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.645739 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"ccdda4af-b9bc-4225-9310-b54709e7ee09","Type":"ContainerStarted","Data":"96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02"} Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.646065 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"ccdda4af-b9bc-4225-9310-b54709e7ee09","Type":"ContainerStarted","Data":"671aaecf499d674dd1ac560f38d14fc90f49dc15a01d67479fd56ba59bb4bb8f"} Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.646116 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:33 crc kubenswrapper[4940]: I1126 08:50:33.669637 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.6696207090000001 podStartE2EDuration="1.669620709s" podCreationTimestamp="2025-11-26 08:50:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:33.664575739 +0000 UTC m=+6935.184717358" watchObservedRunningTime="2025-11-26 08:50:33.669620709 +0000 UTC m=+6935.189762328" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.193569 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.669479 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-s8s4r"] Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.670877 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.674625 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.680844 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-s8s4r"] Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.688586 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.751005 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-scripts\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.751114 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-config-data\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.751168 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm6sr\" (UniqueName: \"kubernetes.io/projected/679f64f7-3f0a-4b6f-800a-11aa64c61f29-kube-api-access-bm6sr\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.751209 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.854660 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-scripts\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.855091 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-config-data\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.855271 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm6sr\" (UniqueName: \"kubernetes.io/projected/679f64f7-3f0a-4b6f-800a-11aa64c61f29-kube-api-access-bm6sr\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.855425 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.865079 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-config-data\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.865425 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.867258 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.867516 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-scripts\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.869486 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.875303 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.877144 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.890137 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm6sr\" (UniqueName: \"kubernetes.io/projected/679f64f7-3f0a-4b6f-800a-11aa64c61f29-kube-api-access-bm6sr\") pod \"nova-cell0-cell-mapping-s8s4r\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.906171 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.907255 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.911599 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.919126 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.957063 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-logs\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.958634 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-config-data\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.958879 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5b2q\" (UniqueName: \"kubernetes.io/projected/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-kube-api-access-j5b2q\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:38 crc kubenswrapper[4940]: I1126 08:50:38.959128 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.001203 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.040408 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.041838 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.046371 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.060473 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.060537 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-logs\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.060581 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt6dm\" (UniqueName: \"kubernetes.io/projected/aa1d555a-b48e-4e73-9364-8a0433712c0e-kube-api-access-qt6dm\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.060644 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.060674 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-config-data\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.060694 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-config-data\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.060751 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5b2q\" (UniqueName: \"kubernetes.io/projected/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-kube-api-access-j5b2q\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.066411 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.067569 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-config-data\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.068845 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-logs\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.069013 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.112701 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5b2q\" (UniqueName: \"kubernetes.io/projected/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-kube-api-access-j5b2q\") pod \"nova-api-0\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.137891 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.141013 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.145444 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.162824 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt6dm\" (UniqueName: \"kubernetes.io/projected/aa1d555a-b48e-4e73-9364-8a0433712c0e-kube-api-access-qt6dm\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.162889 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-config-data\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.162912 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.162934 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-config-data\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.162971 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47400aad-df54-4c42-93ea-2af6b06168c9-logs\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.163005 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x5ck\" (UniqueName: \"kubernetes.io/projected/47400aad-df54-4c42-93ea-2af6b06168c9-kube-api-access-7x5ck\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.163028 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.167740 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-config-data\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.175424 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.184141 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt6dm\" (UniqueName: \"kubernetes.io/projected/aa1d555a-b48e-4e73-9364-8a0433712c0e-kube-api-access-qt6dm\") pod \"nova-scheduler-0\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.214075 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.240984 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f4579ddb7-7k4n2"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.242388 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.261729 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f4579ddb7-7k4n2"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.264900 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47400aad-df54-4c42-93ea-2af6b06168c9-logs\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.264935 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9f4v\" (UniqueName: \"kubernetes.io/projected/84b7d511-58d4-46ef-af52-4dc808018a92-kube-api-access-s9f4v\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.265030 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x5ck\" (UniqueName: \"kubernetes.io/projected/47400aad-df54-4c42-93ea-2af6b06168c9-kube-api-access-7x5ck\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.265080 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.265124 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.265229 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.265254 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-config-data\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.266712 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47400aad-df54-4c42-93ea-2af6b06168c9-logs\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.276092 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.289394 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-config-data\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.292499 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x5ck\" (UniqueName: \"kubernetes.io/projected/47400aad-df54-4c42-93ea-2af6b06168c9-kube-api-access-7x5ck\") pod \"nova-metadata-0\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.302813 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.314257 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.372184 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbll4\" (UniqueName: \"kubernetes.io/projected/83232608-2b13-4451-a72b-5638f3d2d55a-kube-api-access-rbll4\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.372660 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-nb\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.372723 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.372820 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9f4v\" (UniqueName: \"kubernetes.io/projected/84b7d511-58d4-46ef-af52-4dc808018a92-kube-api-access-s9f4v\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.376232 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-sb\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.376365 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.376950 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-config\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.376983 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-dns-svc\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.383739 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.386898 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.397513 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9f4v\" (UniqueName: \"kubernetes.io/projected/84b7d511-58d4-46ef-af52-4dc808018a92-kube-api-access-s9f4v\") pod \"nova-cell1-novncproxy-0\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.463246 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.479636 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-sb\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.479698 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-config\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.479717 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-dns-svc\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.479760 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbll4\" (UniqueName: \"kubernetes.io/projected/83232608-2b13-4451-a72b-5638f3d2d55a-kube-api-access-rbll4\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.479805 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-nb\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.480554 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-config\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.480586 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-nb\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.481106 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-dns-svc\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.481684 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-sb\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.484962 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.517583 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbll4\" (UniqueName: \"kubernetes.io/projected/83232608-2b13-4451-a72b-5638f3d2d55a-kube-api-access-rbll4\") pod \"dnsmasq-dns-7f4579ddb7-7k4n2\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.569456 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.655636 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-s8s4r"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.746258 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-s8s4r" event={"ID":"679f64f7-3f0a-4b6f-800a-11aa64c61f29","Type":"ContainerStarted","Data":"d3fefd060dcd146f265144247d865efb8b1521ff3cfff50c6e020389711586af"} Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.797104 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rj66c"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.798339 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.803120 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.805439 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.811279 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rj66c"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.826140 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:39 crc kubenswrapper[4940]: W1126 08:50:39.836640 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa1d555a_b48e_4e73_9364_8a0433712c0e.slice/crio-391b51f6ffa31c02ec7ecb5dea026ba66cfd387b9f946be02e299972ea5357e8 WatchSource:0}: Error finding container 391b51f6ffa31c02ec7ecb5dea026ba66cfd387b9f946be02e299972ea5357e8: Status 404 returned error can't find the container with id 391b51f6ffa31c02ec7ecb5dea026ba66cfd387b9f946be02e299972ea5357e8 Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.895643 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-scripts\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.895911 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-config-data\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.896124 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt2m9\" (UniqueName: \"kubernetes.io/projected/157792b6-aead-4358-82d4-c7ba3a6661e2-kube-api-access-zt2m9\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.896241 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.925200 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.997552 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.998013 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-scripts\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.998178 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-config-data\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:39 crc kubenswrapper[4940]: I1126 08:50:39.998259 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt2m9\" (UniqueName: \"kubernetes.io/projected/157792b6-aead-4358-82d4-c7ba3a6661e2-kube-api-access-zt2m9\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.002430 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-scripts\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.002839 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.013130 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-config-data\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.015689 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt2m9\" (UniqueName: \"kubernetes.io/projected/157792b6-aead-4358-82d4-c7ba3a6661e2-kube-api-access-zt2m9\") pod \"nova-cell1-conductor-db-sync-rj66c\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.082212 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.089619 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:50:40 crc kubenswrapper[4940]: W1126 08:50:40.102158 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84b7d511_58d4_46ef_af52_4dc808018a92.slice/crio-eeb54f9ecc8471316aa2d5c811d7087615ae6bb4e2036956505e8f7b5ac40213 WatchSource:0}: Error finding container eeb54f9ecc8471316aa2d5c811d7087615ae6bb4e2036956505e8f7b5ac40213: Status 404 returned error can't find the container with id eeb54f9ecc8471316aa2d5c811d7087615ae6bb4e2036956505e8f7b5ac40213 Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.118887 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.214238 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f4579ddb7-7k4n2"] Nov 26 08:50:40 crc kubenswrapper[4940]: W1126 08:50:40.220357 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83232608_2b13_4451_a72b_5638f3d2d55a.slice/crio-afeee777c04e53ad29f26b21b5c70103fa14a296b4d62ce52f347cbcaca718dc WatchSource:0}: Error finding container afeee777c04e53ad29f26b21b5c70103fa14a296b4d62ce52f347cbcaca718dc: Status 404 returned error can't find the container with id afeee777c04e53ad29f26b21b5c70103fa14a296b4d62ce52f347cbcaca718dc Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.589569 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rj66c"] Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.780749 4940 generic.go:334] "Generic (PLEG): container finished" podID="83232608-2b13-4451-a72b-5638f3d2d55a" containerID="30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75" exitCode=0 Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.780875 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" event={"ID":"83232608-2b13-4451-a72b-5638f3d2d55a","Type":"ContainerDied","Data":"30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75"} Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.780910 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" event={"ID":"83232608-2b13-4451-a72b-5638f3d2d55a","Type":"ContainerStarted","Data":"afeee777c04e53ad29f26b21b5c70103fa14a296b4d62ce52f347cbcaca718dc"} Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.785029 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"84b7d511-58d4-46ef-af52-4dc808018a92","Type":"ContainerStarted","Data":"eeb54f9ecc8471316aa2d5c811d7087615ae6bb4e2036956505e8f7b5ac40213"} Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.786878 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"aa1d555a-b48e-4e73-9364-8a0433712c0e","Type":"ContainerStarted","Data":"391b51f6ffa31c02ec7ecb5dea026ba66cfd387b9f946be02e299972ea5357e8"} Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.789502 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c","Type":"ContainerStarted","Data":"0871d24a63c376f6c3efab27470c03bf013e806ba3a8f8ef8c8e94d084c9f35c"} Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.793246 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-s8s4r" event={"ID":"679f64f7-3f0a-4b6f-800a-11aa64c61f29","Type":"ContainerStarted","Data":"011384c1edf223a9d805308d673107fb725b8596a0e79bf23284a900aeb7908f"} Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.796210 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47400aad-df54-4c42-93ea-2af6b06168c9","Type":"ContainerStarted","Data":"0ac40d4bde05ad44e774f157b5a88f540ee5d3bfe8a5946ae5a907ce37e1893c"} Nov 26 08:50:40 crc kubenswrapper[4940]: I1126 08:50:40.826142 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-s8s4r" podStartSLOduration=2.826106452 podStartE2EDuration="2.826106452s" podCreationTimestamp="2025-11-26 08:50:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:40.816691092 +0000 UTC m=+6942.336832721" watchObservedRunningTime="2025-11-26 08:50:40.826106452 +0000 UTC m=+6942.346248111" Nov 26 08:50:41 crc kubenswrapper[4940]: I1126 08:50:41.808186 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rj66c" event={"ID":"157792b6-aead-4358-82d4-c7ba3a6661e2","Type":"ContainerStarted","Data":"5f6b1367d01896406fa84e38d778acb67e138667541797b3effdd42cd1fa07cf"} Nov 26 08:50:42 crc kubenswrapper[4940]: I1126 08:50:42.168009 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:50:42 crc kubenswrapper[4940]: E1126 08:50:42.168310 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.832785 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rj66c" event={"ID":"157792b6-aead-4358-82d4-c7ba3a6661e2","Type":"ContainerStarted","Data":"8d8dd3f9a61115d7b0aaca1f6674482246b64b220cbef8fde0c2384c0e854e94"} Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.836827 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c","Type":"ContainerStarted","Data":"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93"} Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.836878 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c","Type":"ContainerStarted","Data":"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5"} Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.840938 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47400aad-df54-4c42-93ea-2af6b06168c9","Type":"ContainerStarted","Data":"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a"} Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.840986 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47400aad-df54-4c42-93ea-2af6b06168c9","Type":"ContainerStarted","Data":"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34"} Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.843902 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" event={"ID":"83232608-2b13-4451-a72b-5638f3d2d55a","Type":"ContainerStarted","Data":"e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b"} Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.845099 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.846677 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"84b7d511-58d4-46ef-af52-4dc808018a92","Type":"ContainerStarted","Data":"99e0a0c536159ce4f1407628eab4e22fda6bca1599755a9aeeae19b51556db79"} Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.848926 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"aa1d555a-b48e-4e73-9364-8a0433712c0e","Type":"ContainerStarted","Data":"0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae"} Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.873413 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-rj66c" podStartSLOduration=4.873389216 podStartE2EDuration="4.873389216s" podCreationTimestamp="2025-11-26 08:50:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:43.863357757 +0000 UTC m=+6945.383499396" watchObservedRunningTime="2025-11-26 08:50:43.873389216 +0000 UTC m=+6945.393530855" Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.888803 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" podStartSLOduration=4.888780806 podStartE2EDuration="4.888780806s" podCreationTimestamp="2025-11-26 08:50:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:43.885970666 +0000 UTC m=+6945.406112295" watchObservedRunningTime="2025-11-26 08:50:43.888780806 +0000 UTC m=+6945.408922435" Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.923542 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.444831862 podStartE2EDuration="4.923513089s" podCreationTimestamp="2025-11-26 08:50:39 +0000 UTC" firstStartedPulling="2025-11-26 08:50:40.090674119 +0000 UTC m=+6941.610815738" lastFinishedPulling="2025-11-26 08:50:42.569355346 +0000 UTC m=+6944.089496965" observedRunningTime="2025-11-26 08:50:43.907574063 +0000 UTC m=+6945.427715692" watchObservedRunningTime="2025-11-26 08:50:43.923513089 +0000 UTC m=+6945.443654748" Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.947304 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.223765265 podStartE2EDuration="5.947284746s" podCreationTimestamp="2025-11-26 08:50:38 +0000 UTC" firstStartedPulling="2025-11-26 08:50:39.839191653 +0000 UTC m=+6941.359333272" lastFinishedPulling="2025-11-26 08:50:42.562711134 +0000 UTC m=+6944.082852753" observedRunningTime="2025-11-26 08:50:43.93517648 +0000 UTC m=+6945.455318119" watchObservedRunningTime="2025-11-26 08:50:43.947284746 +0000 UTC m=+6945.467426375" Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.964900 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.321212722 podStartE2EDuration="5.964876104s" podCreationTimestamp="2025-11-26 08:50:38 +0000 UTC" firstStartedPulling="2025-11-26 08:50:39.925115835 +0000 UTC m=+6941.445257444" lastFinishedPulling="2025-11-26 08:50:42.568779207 +0000 UTC m=+6944.088920826" observedRunningTime="2025-11-26 08:50:43.953548575 +0000 UTC m=+6945.473690204" watchObservedRunningTime="2025-11-26 08:50:43.964876104 +0000 UTC m=+6945.485017723" Nov 26 08:50:43 crc kubenswrapper[4940]: I1126 08:50:43.975113 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.516859903 podStartE2EDuration="4.97509353s" podCreationTimestamp="2025-11-26 08:50:39 +0000 UTC" firstStartedPulling="2025-11-26 08:50:40.10990837 +0000 UTC m=+6941.630049989" lastFinishedPulling="2025-11-26 08:50:42.568141997 +0000 UTC m=+6944.088283616" observedRunningTime="2025-11-26 08:50:43.969992598 +0000 UTC m=+6945.490134237" watchObservedRunningTime="2025-11-26 08:50:43.97509353 +0000 UTC m=+6945.495235159" Nov 26 08:50:44 crc kubenswrapper[4940]: I1126 08:50:44.314910 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 08:50:44 crc kubenswrapper[4940]: I1126 08:50:44.463839 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 08:50:44 crc kubenswrapper[4940]: I1126 08:50:44.463899 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 08:50:44 crc kubenswrapper[4940]: I1126 08:50:44.496077 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:44 crc kubenswrapper[4940]: I1126 08:50:44.858609 4940 generic.go:334] "Generic (PLEG): container finished" podID="679f64f7-3f0a-4b6f-800a-11aa64c61f29" containerID="011384c1edf223a9d805308d673107fb725b8596a0e79bf23284a900aeb7908f" exitCode=0 Nov 26 08:50:44 crc kubenswrapper[4940]: I1126 08:50:44.858658 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-s8s4r" event={"ID":"679f64f7-3f0a-4b6f-800a-11aa64c61f29","Type":"ContainerDied","Data":"011384c1edf223a9d805308d673107fb725b8596a0e79bf23284a900aeb7908f"} Nov 26 08:50:45 crc kubenswrapper[4940]: I1126 08:50:45.868176 4940 generic.go:334] "Generic (PLEG): container finished" podID="157792b6-aead-4358-82d4-c7ba3a6661e2" containerID="8d8dd3f9a61115d7b0aaca1f6674482246b64b220cbef8fde0c2384c0e854e94" exitCode=0 Nov 26 08:50:45 crc kubenswrapper[4940]: I1126 08:50:45.868280 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rj66c" event={"ID":"157792b6-aead-4358-82d4-c7ba3a6661e2","Type":"ContainerDied","Data":"8d8dd3f9a61115d7b0aaca1f6674482246b64b220cbef8fde0c2384c0e854e94"} Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.223337 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.351287 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-scripts\") pod \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.351430 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-config-data\") pod \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.351495 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-combined-ca-bundle\") pod \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.351577 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bm6sr\" (UniqueName: \"kubernetes.io/projected/679f64f7-3f0a-4b6f-800a-11aa64c61f29-kube-api-access-bm6sr\") pod \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\" (UID: \"679f64f7-3f0a-4b6f-800a-11aa64c61f29\") " Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.356202 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-scripts" (OuterVolumeSpecName: "scripts") pod "679f64f7-3f0a-4b6f-800a-11aa64c61f29" (UID: "679f64f7-3f0a-4b6f-800a-11aa64c61f29"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.356323 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/679f64f7-3f0a-4b6f-800a-11aa64c61f29-kube-api-access-bm6sr" (OuterVolumeSpecName: "kube-api-access-bm6sr") pod "679f64f7-3f0a-4b6f-800a-11aa64c61f29" (UID: "679f64f7-3f0a-4b6f-800a-11aa64c61f29"). InnerVolumeSpecName "kube-api-access-bm6sr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.375827 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-config-data" (OuterVolumeSpecName: "config-data") pod "679f64f7-3f0a-4b6f-800a-11aa64c61f29" (UID: "679f64f7-3f0a-4b6f-800a-11aa64c61f29"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.393869 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "679f64f7-3f0a-4b6f-800a-11aa64c61f29" (UID: "679f64f7-3f0a-4b6f-800a-11aa64c61f29"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.454267 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.454309 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.454321 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bm6sr\" (UniqueName: \"kubernetes.io/projected/679f64f7-3f0a-4b6f-800a-11aa64c61f29-kube-api-access-bm6sr\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.454331 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/679f64f7-3f0a-4b6f-800a-11aa64c61f29-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.882933 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-s8s4r" event={"ID":"679f64f7-3f0a-4b6f-800a-11aa64c61f29","Type":"ContainerDied","Data":"d3fefd060dcd146f265144247d865efb8b1521ff3cfff50c6e020389711586af"} Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.883002 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3fefd060dcd146f265144247d865efb8b1521ff3cfff50c6e020389711586af" Nov 26 08:50:46 crc kubenswrapper[4940]: I1126 08:50:46.883012 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-s8s4r" Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.093757 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod679f64f7_3f0a_4b6f_800a_11aa64c61f29.slice/crio-d3fefd060dcd146f265144247d865efb8b1521ff3cfff50c6e020389711586af\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod679f64f7_3f0a_4b6f_800a_11aa64c61f29.slice\": RecentStats: unable to find data in memory cache]" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.138651 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.138862 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerName="nova-api-log" containerID="cri-o://2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5" gracePeriod=30 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.139298 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerName="nova-api-api" containerID="cri-o://bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93" gracePeriod=30 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.232699 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.232757 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.232972 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" containerName="nova-metadata-log" containerID="cri-o://25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34" gracePeriod=30 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.233234 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="aa1d555a-b48e-4e73-9364-8a0433712c0e" containerName="nova-scheduler-scheduler" containerID="cri-o://0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae" gracePeriod=30 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.233572 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" containerName="nova-metadata-metadata" containerID="cri-o://db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a" gracePeriod=30 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.398716 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.479168 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-config-data\") pod \"157792b6-aead-4358-82d4-c7ba3a6661e2\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.479319 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-scripts\") pod \"157792b6-aead-4358-82d4-c7ba3a6661e2\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.479386 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zt2m9\" (UniqueName: \"kubernetes.io/projected/157792b6-aead-4358-82d4-c7ba3a6661e2-kube-api-access-zt2m9\") pod \"157792b6-aead-4358-82d4-c7ba3a6661e2\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.479429 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-combined-ca-bundle\") pod \"157792b6-aead-4358-82d4-c7ba3a6661e2\" (UID: \"157792b6-aead-4358-82d4-c7ba3a6661e2\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.495563 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/157792b6-aead-4358-82d4-c7ba3a6661e2-kube-api-access-zt2m9" (OuterVolumeSpecName: "kube-api-access-zt2m9") pod "157792b6-aead-4358-82d4-c7ba3a6661e2" (UID: "157792b6-aead-4358-82d4-c7ba3a6661e2"). InnerVolumeSpecName "kube-api-access-zt2m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.499238 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-scripts" (OuterVolumeSpecName: "scripts") pod "157792b6-aead-4358-82d4-c7ba3a6661e2" (UID: "157792b6-aead-4358-82d4-c7ba3a6661e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.544181 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "157792b6-aead-4358-82d4-c7ba3a6661e2" (UID: "157792b6-aead-4358-82d4-c7ba3a6661e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.551302 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-config-data" (OuterVolumeSpecName: "config-data") pod "157792b6-aead-4358-82d4-c7ba3a6661e2" (UID: "157792b6-aead-4358-82d4-c7ba3a6661e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.581270 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.581299 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zt2m9\" (UniqueName: \"kubernetes.io/projected/157792b6-aead-4358-82d4-c7ba3a6661e2-kube-api-access-zt2m9\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.581309 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.581320 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/157792b6-aead-4358-82d4-c7ba3a6661e2-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.801253 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.883519 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.886956 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-combined-ca-bundle\") pod \"47400aad-df54-4c42-93ea-2af6b06168c9\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.887020 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7x5ck\" (UniqueName: \"kubernetes.io/projected/47400aad-df54-4c42-93ea-2af6b06168c9-kube-api-access-7x5ck\") pod \"47400aad-df54-4c42-93ea-2af6b06168c9\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.887165 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47400aad-df54-4c42-93ea-2af6b06168c9-logs\") pod \"47400aad-df54-4c42-93ea-2af6b06168c9\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.887267 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-config-data\") pod \"47400aad-df54-4c42-93ea-2af6b06168c9\" (UID: \"47400aad-df54-4c42-93ea-2af6b06168c9\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.887527 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47400aad-df54-4c42-93ea-2af6b06168c9-logs" (OuterVolumeSpecName: "logs") pod "47400aad-df54-4c42-93ea-2af6b06168c9" (UID: "47400aad-df54-4c42-93ea-2af6b06168c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.887818 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47400aad-df54-4c42-93ea-2af6b06168c9-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.890583 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47400aad-df54-4c42-93ea-2af6b06168c9-kube-api-access-7x5ck" (OuterVolumeSpecName: "kube-api-access-7x5ck") pod "47400aad-df54-4c42-93ea-2af6b06168c9" (UID: "47400aad-df54-4c42-93ea-2af6b06168c9"). InnerVolumeSpecName "kube-api-access-7x5ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.891478 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rj66c" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.891494 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rj66c" event={"ID":"157792b6-aead-4358-82d4-c7ba3a6661e2","Type":"ContainerDied","Data":"5f6b1367d01896406fa84e38d778acb67e138667541797b3effdd42cd1fa07cf"} Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.891561 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f6b1367d01896406fa84e38d778acb67e138667541797b3effdd42cd1fa07cf" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.893657 4940 generic.go:334] "Generic (PLEG): container finished" podID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerID="bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93" exitCode=0 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.893692 4940 generic.go:334] "Generic (PLEG): container finished" podID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerID="2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5" exitCode=143 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.893755 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.893756 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c","Type":"ContainerDied","Data":"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93"} Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.893901 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c","Type":"ContainerDied","Data":"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5"} Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.893958 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c","Type":"ContainerDied","Data":"0871d24a63c376f6c3efab27470c03bf013e806ba3a8f8ef8c8e94d084c9f35c"} Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.893984 4940 scope.go:117] "RemoveContainer" containerID="bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.895757 4940 generic.go:334] "Generic (PLEG): container finished" podID="47400aad-df54-4c42-93ea-2af6b06168c9" containerID="db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a" exitCode=0 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.895779 4940 generic.go:334] "Generic (PLEG): container finished" podID="47400aad-df54-4c42-93ea-2af6b06168c9" containerID="25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34" exitCode=143 Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.895797 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47400aad-df54-4c42-93ea-2af6b06168c9","Type":"ContainerDied","Data":"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a"} Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.895820 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47400aad-df54-4c42-93ea-2af6b06168c9","Type":"ContainerDied","Data":"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34"} Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.895831 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47400aad-df54-4c42-93ea-2af6b06168c9","Type":"ContainerDied","Data":"0ac40d4bde05ad44e774f157b5a88f540ee5d3bfe8a5946ae5a907ce37e1893c"} Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.895835 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.915908 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47400aad-df54-4c42-93ea-2af6b06168c9" (UID: "47400aad-df54-4c42-93ea-2af6b06168c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.948915 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-config-data" (OuterVolumeSpecName: "config-data") pod "47400aad-df54-4c42-93ea-2af6b06168c9" (UID: "47400aad-df54-4c42-93ea-2af6b06168c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.951705 4940 scope.go:117] "RemoveContainer" containerID="2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.978755 4940 scope.go:117] "RemoveContainer" containerID="bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93" Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.979314 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93\": container with ID starting with bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93 not found: ID does not exist" containerID="bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.979406 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93"} err="failed to get container status \"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93\": rpc error: code = NotFound desc = could not find container \"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93\": container with ID starting with bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93 not found: ID does not exist" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.979440 4940 scope.go:117] "RemoveContainer" containerID="2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5" Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.981212 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5\": container with ID starting with 2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5 not found: ID does not exist" containerID="2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.981238 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5"} err="failed to get container status \"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5\": rpc error: code = NotFound desc = could not find container \"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5\": container with ID starting with 2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5 not found: ID does not exist" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.981254 4940 scope.go:117] "RemoveContainer" containerID="bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.981447 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93"} err="failed to get container status \"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93\": rpc error: code = NotFound desc = could not find container \"bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93\": container with ID starting with bdacf2aa4a95628b91e5a0f593271e80b42018f20207f345d2c23589a4b85e93 not found: ID does not exist" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.981470 4940 scope.go:117] "RemoveContainer" containerID="2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.981856 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5"} err="failed to get container status \"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5\": rpc error: code = NotFound desc = could not find container \"2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5\": container with ID starting with 2d151f470a59e728779e6b4dc1f7fc9cdb97bf64af0c427628c3dfa6521584a5 not found: ID does not exist" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.981886 4940 scope.go:117] "RemoveContainer" containerID="db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987180 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.987590 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerName="nova-api-log" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987611 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerName="nova-api-log" Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.987626 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="157792b6-aead-4358-82d4-c7ba3a6661e2" containerName="nova-cell1-conductor-db-sync" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987633 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="157792b6-aead-4358-82d4-c7ba3a6661e2" containerName="nova-cell1-conductor-db-sync" Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.987645 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" containerName="nova-metadata-log" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987651 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" containerName="nova-metadata-log" Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.987671 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679f64f7-3f0a-4b6f-800a-11aa64c61f29" containerName="nova-manage" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987677 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="679f64f7-3f0a-4b6f-800a-11aa64c61f29" containerName="nova-manage" Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.987686 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerName="nova-api-api" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987691 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerName="nova-api-api" Nov 26 08:50:47 crc kubenswrapper[4940]: E1126 08:50:47.987714 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" containerName="nova-metadata-metadata" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987720 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" containerName="nova-metadata-metadata" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987875 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="157792b6-aead-4358-82d4-c7ba3a6661e2" containerName="nova-cell1-conductor-db-sync" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987885 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerName="nova-api-log" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987896 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" containerName="nova-metadata-metadata" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987906 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" containerName="nova-api-api" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987915 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="679f64f7-3f0a-4b6f-800a-11aa64c61f29" containerName="nova-manage" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.987930 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" containerName="nova-metadata-log" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.988393 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5b2q\" (UniqueName: \"kubernetes.io/projected/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-kube-api-access-j5b2q\") pod \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.988562 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.988582 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-logs\") pod \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.988630 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-config-data\") pod \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.988664 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-combined-ca-bundle\") pod \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\" (UID: \"2cd1d1a1-ed28-4409-b1d3-80a899f96e7c\") " Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.989299 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-logs" (OuterVolumeSpecName: "logs") pod "2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" (UID: "2cd1d1a1-ed28-4409-b1d3-80a899f96e7c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.989337 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.989352 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47400aad-df54-4c42-93ea-2af6b06168c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.989362 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7x5ck\" (UniqueName: \"kubernetes.io/projected/47400aad-df54-4c42-93ea-2af6b06168c9-kube-api-access-7x5ck\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.990444 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.997585 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-kube-api-access-j5b2q" (OuterVolumeSpecName: "kube-api-access-j5b2q") pod "2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" (UID: "2cd1d1a1-ed28-4409-b1d3-80a899f96e7c"). InnerVolumeSpecName "kube-api-access-j5b2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:47 crc kubenswrapper[4940]: I1126 08:50:47.998686 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.016028 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-config-data" (OuterVolumeSpecName: "config-data") pod "2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" (UID: "2cd1d1a1-ed28-4409-b1d3-80a899f96e7c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.019610 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" (UID: "2cd1d1a1-ed28-4409-b1d3-80a899f96e7c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.027346 4940 scope.go:117] "RemoveContainer" containerID="25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.043913 4940 scope.go:117] "RemoveContainer" containerID="db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a" Nov 26 08:50:48 crc kubenswrapper[4940]: E1126 08:50:48.044312 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a\": container with ID starting with db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a not found: ID does not exist" containerID="db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.044344 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a"} err="failed to get container status \"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a\": rpc error: code = NotFound desc = could not find container \"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a\": container with ID starting with db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a not found: ID does not exist" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.044368 4940 scope.go:117] "RemoveContainer" containerID="25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34" Nov 26 08:50:48 crc kubenswrapper[4940]: E1126 08:50:48.044593 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34\": container with ID starting with 25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34 not found: ID does not exist" containerID="25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.044639 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34"} err="failed to get container status \"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34\": rpc error: code = NotFound desc = could not find container \"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34\": container with ID starting with 25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34 not found: ID does not exist" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.044669 4940 scope.go:117] "RemoveContainer" containerID="db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.045328 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a"} err="failed to get container status \"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a\": rpc error: code = NotFound desc = could not find container \"db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a\": container with ID starting with db3889fb617927516fab087bbd1c16897842f81b88adcc186f7c27f5559a5d7a not found: ID does not exist" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.045354 4940 scope.go:117] "RemoveContainer" containerID="25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.045825 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34"} err="failed to get container status \"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34\": rpc error: code = NotFound desc = could not find container \"25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34\": container with ID starting with 25d811b028e897be2a482f976857aca8d0c2f2dbf3017bb2e23a2bff30117a34 not found: ID does not exist" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.091148 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.091261 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swm7x\" (UniqueName: \"kubernetes.io/projected/ad7344d8-55c3-4025-8cba-b5919e8d42a8-kube-api-access-swm7x\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.091289 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.091387 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.091398 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.091407 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.091417 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5b2q\" (UniqueName: \"kubernetes.io/projected/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c-kube-api-access-j5b2q\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.193314 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.193392 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swm7x\" (UniqueName: \"kubernetes.io/projected/ad7344d8-55c3-4025-8cba-b5919e8d42a8-kube-api-access-swm7x\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.193428 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.196749 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.207221 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.210949 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swm7x\" (UniqueName: \"kubernetes.io/projected/ad7344d8-55c3-4025-8cba-b5919e8d42a8-kube-api-access-swm7x\") pod \"nova-cell1-conductor-0\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.257848 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.269725 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.284260 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.297492 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.313743 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.315353 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.317385 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.325142 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.329378 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.335086 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.336669 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.339006 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.379372 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.396368 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.396447 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwtpw\" (UniqueName: \"kubernetes.io/projected/77ceabba-3a0d-417c-ae23-37f6800fa1ff-kube-api-access-wwtpw\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.396485 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ceabba-3a0d-417c-ae23-37f6800fa1ff-logs\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.396594 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-config-data\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.396641 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-config-data\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.396669 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.396686 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c47wh\" (UniqueName: \"kubernetes.io/projected/8895109f-09f0-46b6-92af-ea6f0d61a836-kube-api-access-c47wh\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.396705 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8895109f-09f0-46b6-92af-ea6f0d61a836-logs\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.458720 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.497883 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-config-data\") pod \"aa1d555a-b48e-4e73-9364-8a0433712c0e\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.498118 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-combined-ca-bundle\") pod \"aa1d555a-b48e-4e73-9364-8a0433712c0e\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.498519 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qt6dm\" (UniqueName: \"kubernetes.io/projected/aa1d555a-b48e-4e73-9364-8a0433712c0e-kube-api-access-qt6dm\") pod \"aa1d555a-b48e-4e73-9364-8a0433712c0e\" (UID: \"aa1d555a-b48e-4e73-9364-8a0433712c0e\") " Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.498856 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwtpw\" (UniqueName: \"kubernetes.io/projected/77ceabba-3a0d-417c-ae23-37f6800fa1ff-kube-api-access-wwtpw\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.498933 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ceabba-3a0d-417c-ae23-37f6800fa1ff-logs\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.499205 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-config-data\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.499344 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-config-data\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.499415 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.499444 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c47wh\" (UniqueName: \"kubernetes.io/projected/8895109f-09f0-46b6-92af-ea6f0d61a836-kube-api-access-c47wh\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.499468 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8895109f-09f0-46b6-92af-ea6f0d61a836-logs\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.499560 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.500645 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ceabba-3a0d-417c-ae23-37f6800fa1ff-logs\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.502789 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8895109f-09f0-46b6-92af-ea6f0d61a836-logs\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.503090 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-config-data\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.503285 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa1d555a-b48e-4e73-9364-8a0433712c0e-kube-api-access-qt6dm" (OuterVolumeSpecName: "kube-api-access-qt6dm") pod "aa1d555a-b48e-4e73-9364-8a0433712c0e" (UID: "aa1d555a-b48e-4e73-9364-8a0433712c0e"). InnerVolumeSpecName "kube-api-access-qt6dm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.503919 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.504901 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.505950 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-config-data\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.518530 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c47wh\" (UniqueName: \"kubernetes.io/projected/8895109f-09f0-46b6-92af-ea6f0d61a836-kube-api-access-c47wh\") pod \"nova-metadata-0\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.519777 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwtpw\" (UniqueName: \"kubernetes.io/projected/77ceabba-3a0d-417c-ae23-37f6800fa1ff-kube-api-access-wwtpw\") pod \"nova-api-0\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.524852 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aa1d555a-b48e-4e73-9364-8a0433712c0e" (UID: "aa1d555a-b48e-4e73-9364-8a0433712c0e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.551917 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-config-data" (OuterVolumeSpecName: "config-data") pod "aa1d555a-b48e-4e73-9364-8a0433712c0e" (UID: "aa1d555a-b48e-4e73-9364-8a0433712c0e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.601585 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qt6dm\" (UniqueName: \"kubernetes.io/projected/aa1d555a-b48e-4e73-9364-8a0433712c0e-kube-api-access-qt6dm\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.601616 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.601627 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa1d555a-b48e-4e73-9364-8a0433712c0e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.657988 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.752753 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.772757 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.925299 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ad7344d8-55c3-4025-8cba-b5919e8d42a8","Type":"ContainerStarted","Data":"be3d927028bb1fed4d09852a1071038377a75dee5bfeed0d789e83c84a29e48a"} Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.927358 4940 generic.go:334] "Generic (PLEG): container finished" podID="aa1d555a-b48e-4e73-9364-8a0433712c0e" containerID="0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae" exitCode=0 Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.927385 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"aa1d555a-b48e-4e73-9364-8a0433712c0e","Type":"ContainerDied","Data":"0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae"} Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.927404 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"aa1d555a-b48e-4e73-9364-8a0433712c0e","Type":"ContainerDied","Data":"391b51f6ffa31c02ec7ecb5dea026ba66cfd387b9f946be02e299972ea5357e8"} Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.927420 4940 scope.go:117] "RemoveContainer" containerID="0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.927529 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.965705 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.980542 4940 scope.go:117] "RemoveContainer" containerID="0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.980857 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: E1126 08:50:48.982607 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae\": container with ID starting with 0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae not found: ID does not exist" containerID="0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.982640 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae"} err="failed to get container status \"0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae\": rpc error: code = NotFound desc = could not find container \"0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae\": container with ID starting with 0c6c19808e76dc7da1f8484f148d4ed20f450cd0ef591c020946beceaeb878ae not found: ID does not exist" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.994632 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:48 crc kubenswrapper[4940]: E1126 08:50:48.995172 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa1d555a-b48e-4e73-9364-8a0433712c0e" containerName="nova-scheduler-scheduler" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.995194 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa1d555a-b48e-4e73-9364-8a0433712c0e" containerName="nova-scheduler-scheduler" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.995437 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa1d555a-b48e-4e73-9364-8a0433712c0e" containerName="nova-scheduler-scheduler" Nov 26 08:50:48 crc kubenswrapper[4940]: I1126 08:50:48.996111 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:48.998819 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.017076 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.067785 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:50:49 crc kubenswrapper[4940]: W1126 08:50:49.072429 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ceabba_3a0d_417c_ae23_37f6800fa1ff.slice/crio-5f62d109d53288105f73c101e2916bce592a252e74e5633e12fc48114bcd47cc WatchSource:0}: Error finding container 5f62d109d53288105f73c101e2916bce592a252e74e5633e12fc48114bcd47cc: Status 404 returned error can't find the container with id 5f62d109d53288105f73c101e2916bce592a252e74e5633e12fc48114bcd47cc Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.115366 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-config-data\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.115497 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.115576 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lv2f\" (UniqueName: \"kubernetes.io/projected/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-kube-api-access-8lv2f\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.181209 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cd1d1a1-ed28-4409-b1d3-80a899f96e7c" path="/var/lib/kubelet/pods/2cd1d1a1-ed28-4409-b1d3-80a899f96e7c/volumes" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.182495 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47400aad-df54-4c42-93ea-2af6b06168c9" path="/var/lib/kubelet/pods/47400aad-df54-4c42-93ea-2af6b06168c9/volumes" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.183552 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa1d555a-b48e-4e73-9364-8a0433712c0e" path="/var/lib/kubelet/pods/aa1d555a-b48e-4e73-9364-8a0433712c0e/volumes" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.201652 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:50:49 crc kubenswrapper[4940]: W1126 08:50:49.208946 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8895109f_09f0_46b6_92af_ea6f0d61a836.slice/crio-1e06b7cd17e14b822f011666baca043d951b98ebcaf48931fb02c9a3c18db033 WatchSource:0}: Error finding container 1e06b7cd17e14b822f011666baca043d951b98ebcaf48931fb02c9a3c18db033: Status 404 returned error can't find the container with id 1e06b7cd17e14b822f011666baca043d951b98ebcaf48931fb02c9a3c18db033 Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.217304 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-config-data\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.217423 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.217467 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lv2f\" (UniqueName: \"kubernetes.io/projected/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-kube-api-access-8lv2f\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.222399 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.224187 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-config-data\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.235427 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lv2f\" (UniqueName: \"kubernetes.io/projected/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-kube-api-access-8lv2f\") pod \"nova-scheduler-0\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.319370 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.495492 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.513563 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.571348 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.634876 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c7d584995-74jn2"] Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.635160 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-c7d584995-74jn2" podUID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" containerName="dnsmasq-dns" containerID="cri-o://f21e559e0e603497ccd920b25b5ed2429c235847f3783eaa60fcea7c8367b3cf" gracePeriod=10 Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.769637 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:50:49 crc kubenswrapper[4940]: W1126 08:50:49.796336 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1bc67c1_09f2_40b3_ba5c_61ed69a18368.slice/crio-82a7dff975fd3f4ebfa142b5e3eb1015069014709d99d354c11da1bf74f8899b WatchSource:0}: Error finding container 82a7dff975fd3f4ebfa142b5e3eb1015069014709d99d354c11da1bf74f8899b: Status 404 returned error can't find the container with id 82a7dff975fd3f4ebfa142b5e3eb1015069014709d99d354c11da1bf74f8899b Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.946052 4940 generic.go:334] "Generic (PLEG): container finished" podID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" containerID="f21e559e0e603497ccd920b25b5ed2429c235847f3783eaa60fcea7c8367b3cf" exitCode=0 Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.946118 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c7d584995-74jn2" event={"ID":"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9","Type":"ContainerDied","Data":"f21e559e0e603497ccd920b25b5ed2429c235847f3783eaa60fcea7c8367b3cf"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.948535 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8895109f-09f0-46b6-92af-ea6f0d61a836","Type":"ContainerStarted","Data":"a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.948559 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8895109f-09f0-46b6-92af-ea6f0d61a836","Type":"ContainerStarted","Data":"f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.948569 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8895109f-09f0-46b6-92af-ea6f0d61a836","Type":"ContainerStarted","Data":"1e06b7cd17e14b822f011666baca043d951b98ebcaf48931fb02c9a3c18db033"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.951282 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ceabba-3a0d-417c-ae23-37f6800fa1ff","Type":"ContainerStarted","Data":"df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.951305 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ceabba-3a0d-417c-ae23-37f6800fa1ff","Type":"ContainerStarted","Data":"47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.951316 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ceabba-3a0d-417c-ae23-37f6800fa1ff","Type":"ContainerStarted","Data":"5f62d109d53288105f73c101e2916bce592a252e74e5633e12fc48114bcd47cc"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.955247 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c1bc67c1-09f2-40b3-ba5c-61ed69a18368","Type":"ContainerStarted","Data":"82a7dff975fd3f4ebfa142b5e3eb1015069014709d99d354c11da1bf74f8899b"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.957189 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ad7344d8-55c3-4025-8cba-b5919e8d42a8","Type":"ContainerStarted","Data":"9ac04ec0ad7a154edb242afdfe0ebc6daba5068e91dae9281ac6765a664f107c"} Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.957272 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.980716 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.980694491 podStartE2EDuration="1.980694491s" podCreationTimestamp="2025-11-26 08:50:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:49.964577048 +0000 UTC m=+6951.484718667" watchObservedRunningTime="2025-11-26 08:50:49.980694491 +0000 UTC m=+6951.500836110" Nov 26 08:50:49 crc kubenswrapper[4940]: I1126 08:50:49.981286 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:49.999727 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.9996869950000002 podStartE2EDuration="2.999686995s" podCreationTimestamp="2025-11-26 08:50:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:49.982194938 +0000 UTC m=+6951.502336557" watchObservedRunningTime="2025-11-26 08:50:49.999686995 +0000 UTC m=+6951.519828624" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.026917 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.026897449 podStartE2EDuration="2.026897449s" podCreationTimestamp="2025-11-26 08:50:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:50.000877112 +0000 UTC m=+6951.521018741" watchObservedRunningTime="2025-11-26 08:50:50.026897449 +0000 UTC m=+6951.547039068" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.069521 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.140408 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-sb\") pod \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.140793 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58rtk\" (UniqueName: \"kubernetes.io/projected/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-kube-api-access-58rtk\") pod \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.140895 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-dns-svc\") pod \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.140996 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-nb\") pod \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.141104 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-config\") pod \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\" (UID: \"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9\") " Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.145538 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-kube-api-access-58rtk" (OuterVolumeSpecName: "kube-api-access-58rtk") pod "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" (UID: "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9"). InnerVolumeSpecName "kube-api-access-58rtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.190944 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" (UID: "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.204354 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-config" (OuterVolumeSpecName: "config") pod "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" (UID: "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.207128 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" (UID: "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.222089 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" (UID: "92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.244196 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.244238 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.244255 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58rtk\" (UniqueName: \"kubernetes.io/projected/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-kube-api-access-58rtk\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.244274 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.244285 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.969808 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c7d584995-74jn2" event={"ID":"92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9","Type":"ContainerDied","Data":"5d877b1b4f0c44b0e7594057178f2a6659bd502ef12c07421adc09fae247476e"} Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.969875 4940 scope.go:117] "RemoveContainer" containerID="f21e559e0e603497ccd920b25b5ed2429c235847f3783eaa60fcea7c8367b3cf" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.970055 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c7d584995-74jn2" Nov 26 08:50:50 crc kubenswrapper[4940]: I1126 08:50:50.979774 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c1bc67c1-09f2-40b3-ba5c-61ed69a18368","Type":"ContainerStarted","Data":"9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d"} Nov 26 08:50:51 crc kubenswrapper[4940]: I1126 08:50:51.004456 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.004436819 podStartE2EDuration="3.004436819s" podCreationTimestamp="2025-11-26 08:50:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:50.99659016 +0000 UTC m=+6952.516731789" watchObservedRunningTime="2025-11-26 08:50:51.004436819 +0000 UTC m=+6952.524578438" Nov 26 08:50:51 crc kubenswrapper[4940]: I1126 08:50:51.019268 4940 scope.go:117] "RemoveContainer" containerID="d15c4411a0b64f84f5725ec5905c2cc8e3c40b0e77a602a73809f326899b694e" Nov 26 08:50:51 crc kubenswrapper[4940]: I1126 08:50:51.024693 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c7d584995-74jn2"] Nov 26 08:50:51 crc kubenswrapper[4940]: I1126 08:50:51.034477 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c7d584995-74jn2"] Nov 26 08:50:51 crc kubenswrapper[4940]: I1126 08:50:51.180980 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" path="/var/lib/kubelet/pods/92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9/volumes" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.370215 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.754021 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.754137 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.832295 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-ztlzq"] Nov 26 08:50:53 crc kubenswrapper[4940]: E1126 08:50:53.832718 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" containerName="dnsmasq-dns" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.832737 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" containerName="dnsmasq-dns" Nov 26 08:50:53 crc kubenswrapper[4940]: E1126 08:50:53.832772 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" containerName="init" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.832783 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" containerName="init" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.833093 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="92f6a461-9fe4-4a6d-88ef-bd8bdb0906a9" containerName="dnsmasq-dns" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.833839 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.837180 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.837373 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 26 08:50:53 crc kubenswrapper[4940]: I1126 08:50:53.844389 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-ztlzq"] Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.006179 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-scripts\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.006240 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.006458 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkw5j\" (UniqueName: \"kubernetes.io/projected/fbfeccf1-3acd-4316-b412-e2a4481b54d4-kube-api-access-dkw5j\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.006539 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-config-data\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.107693 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-scripts\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.107963 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.108186 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkw5j\" (UniqueName: \"kubernetes.io/projected/fbfeccf1-3acd-4316-b412-e2a4481b54d4-kube-api-access-dkw5j\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.108245 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-config-data\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.113408 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.120162 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-scripts\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.120548 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-config-data\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.131527 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkw5j\" (UniqueName: \"kubernetes.io/projected/fbfeccf1-3acd-4316-b412-e2a4481b54d4-kube-api-access-dkw5j\") pod \"nova-cell1-cell-mapping-ztlzq\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.155110 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.166343 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.320294 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 08:50:54 crc kubenswrapper[4940]: I1126 08:50:54.656262 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-ztlzq"] Nov 26 08:50:54 crc kubenswrapper[4940]: W1126 08:50:54.666771 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfbfeccf1_3acd_4316_b412_e2a4481b54d4.slice/crio-3f1e70b71f68096498b36c07b283613fbaf92239658bcd7bd398040fa0527173 WatchSource:0}: Error finding container 3f1e70b71f68096498b36c07b283613fbaf92239658bcd7bd398040fa0527173: Status 404 returned error can't find the container with id 3f1e70b71f68096498b36c07b283613fbaf92239658bcd7bd398040fa0527173 Nov 26 08:50:55 crc kubenswrapper[4940]: I1126 08:50:55.028980 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-ztlzq" event={"ID":"fbfeccf1-3acd-4316-b412-e2a4481b54d4","Type":"ContainerStarted","Data":"190fae3d34f158d1f87b93eb4b3be09a481d00e353b120788862ef8799aace96"} Nov 26 08:50:55 crc kubenswrapper[4940]: I1126 08:50:55.029353 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-ztlzq" event={"ID":"fbfeccf1-3acd-4316-b412-e2a4481b54d4","Type":"ContainerStarted","Data":"3f1e70b71f68096498b36c07b283613fbaf92239658bcd7bd398040fa0527173"} Nov 26 08:50:55 crc kubenswrapper[4940]: I1126 08:50:55.032889 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"4097e33d00555a418131c81b7ef769c20a467eee44ca0e7c613ac70f0162336d"} Nov 26 08:50:55 crc kubenswrapper[4940]: I1126 08:50:55.055791 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-ztlzq" podStartSLOduration=2.055764417 podStartE2EDuration="2.055764417s" podCreationTimestamp="2025-11-26 08:50:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:50:55.044437327 +0000 UTC m=+6956.564578966" watchObservedRunningTime="2025-11-26 08:50:55.055764417 +0000 UTC m=+6956.575906056" Nov 26 08:50:58 crc kubenswrapper[4940]: I1126 08:50:58.658702 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 08:50:58 crc kubenswrapper[4940]: I1126 08:50:58.659365 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 08:50:58 crc kubenswrapper[4940]: I1126 08:50:58.753249 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 08:50:58 crc kubenswrapper[4940]: I1126 08:50:58.753818 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 08:50:59 crc kubenswrapper[4940]: I1126 08:50:59.319595 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 08:50:59 crc kubenswrapper[4940]: I1126 08:50:59.351480 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 08:50:59 crc kubenswrapper[4940]: I1126 08:50:59.741224 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.109:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:50:59 crc kubenswrapper[4940]: I1126 08:50:59.741259 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.109:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:50:59 crc kubenswrapper[4940]: I1126 08:50:59.835358 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.110:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:50:59 crc kubenswrapper[4940]: I1126 08:50:59.835931 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.110:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:51:00 crc kubenswrapper[4940]: I1126 08:51:00.083412 4940 generic.go:334] "Generic (PLEG): container finished" podID="fbfeccf1-3acd-4316-b412-e2a4481b54d4" containerID="190fae3d34f158d1f87b93eb4b3be09a481d00e353b120788862ef8799aace96" exitCode=0 Nov 26 08:51:00 crc kubenswrapper[4940]: I1126 08:51:00.083501 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-ztlzq" event={"ID":"fbfeccf1-3acd-4316-b412-e2a4481b54d4","Type":"ContainerDied","Data":"190fae3d34f158d1f87b93eb4b3be09a481d00e353b120788862ef8799aace96"} Nov 26 08:51:00 crc kubenswrapper[4940]: I1126 08:51:00.122846 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.443698 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.575032 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkw5j\" (UniqueName: \"kubernetes.io/projected/fbfeccf1-3acd-4316-b412-e2a4481b54d4-kube-api-access-dkw5j\") pod \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.575189 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-scripts\") pod \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.575224 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-combined-ca-bundle\") pod \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.575291 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-config-data\") pod \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\" (UID: \"fbfeccf1-3acd-4316-b412-e2a4481b54d4\") " Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.594186 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbfeccf1-3acd-4316-b412-e2a4481b54d4-kube-api-access-dkw5j" (OuterVolumeSpecName: "kube-api-access-dkw5j") pod "fbfeccf1-3acd-4316-b412-e2a4481b54d4" (UID: "fbfeccf1-3acd-4316-b412-e2a4481b54d4"). InnerVolumeSpecName "kube-api-access-dkw5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.594762 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-scripts" (OuterVolumeSpecName: "scripts") pod "fbfeccf1-3acd-4316-b412-e2a4481b54d4" (UID: "fbfeccf1-3acd-4316-b412-e2a4481b54d4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.629196 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fbfeccf1-3acd-4316-b412-e2a4481b54d4" (UID: "fbfeccf1-3acd-4316-b412-e2a4481b54d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.668212 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-config-data" (OuterVolumeSpecName: "config-data") pod "fbfeccf1-3acd-4316-b412-e2a4481b54d4" (UID: "fbfeccf1-3acd-4316-b412-e2a4481b54d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.678073 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkw5j\" (UniqueName: \"kubernetes.io/projected/fbfeccf1-3acd-4316-b412-e2a4481b54d4-kube-api-access-dkw5j\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.678112 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.678122 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:01 crc kubenswrapper[4940]: I1126 08:51:01.678131 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbfeccf1-3acd-4316-b412-e2a4481b54d4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.107955 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-ztlzq" event={"ID":"fbfeccf1-3acd-4316-b412-e2a4481b54d4","Type":"ContainerDied","Data":"3f1e70b71f68096498b36c07b283613fbaf92239658bcd7bd398040fa0527173"} Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.107999 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f1e70b71f68096498b36c07b283613fbaf92239658bcd7bd398040fa0527173" Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.108029 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-ztlzq" Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.342495 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.342818 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="c1bc67c1-09f2-40b3-ba5c-61ed69a18368" containerName="nova-scheduler-scheduler" containerID="cri-o://9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d" gracePeriod=30 Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.356803 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.357096 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-log" containerID="cri-o://47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9" gracePeriod=30 Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.358131 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-api" containerID="cri-o://df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1" gracePeriod=30 Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.374935 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.375949 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-log" containerID="cri-o://f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d" gracePeriod=30 Nov 26 08:51:02 crc kubenswrapper[4940]: I1126 08:51:02.376091 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-metadata" containerID="cri-o://a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f" gracePeriod=30 Nov 26 08:51:03 crc kubenswrapper[4940]: I1126 08:51:03.117912 4940 generic.go:334] "Generic (PLEG): container finished" podID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerID="47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9" exitCode=143 Nov 26 08:51:03 crc kubenswrapper[4940]: I1126 08:51:03.118002 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ceabba-3a0d-417c-ae23-37f6800fa1ff","Type":"ContainerDied","Data":"47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9"} Nov 26 08:51:03 crc kubenswrapper[4940]: I1126 08:51:03.121433 4940 generic.go:334] "Generic (PLEG): container finished" podID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerID="f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d" exitCode=143 Nov 26 08:51:03 crc kubenswrapper[4940]: I1126 08:51:03.121469 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8895109f-09f0-46b6-92af-ea6f0d61a836","Type":"ContainerDied","Data":"f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d"} Nov 26 08:51:04 crc kubenswrapper[4940]: E1126 08:51:04.321691 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:51:04 crc kubenswrapper[4940]: E1126 08:51:04.322807 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:51:04 crc kubenswrapper[4940]: E1126 08:51:04.325113 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:51:04 crc kubenswrapper[4940]: E1126 08:51:04.325163 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="c1bc67c1-09f2-40b3-ba5c-61ed69a18368" containerName="nova-scheduler-scheduler" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.099873 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.116410 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.173329 4940 generic.go:334] "Generic (PLEG): container finished" podID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerID="a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f" exitCode=0 Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.173408 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8895109f-09f0-46b6-92af-ea6f0d61a836","Type":"ContainerDied","Data":"a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f"} Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.173439 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8895109f-09f0-46b6-92af-ea6f0d61a836","Type":"ContainerDied","Data":"1e06b7cd17e14b822f011666baca043d951b98ebcaf48931fb02c9a3c18db033"} Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.173460 4940 scope.go:117] "RemoveContainer" containerID="a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.173597 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.182920 4940 generic.go:334] "Generic (PLEG): container finished" podID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerID="df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1" exitCode=0 Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.182958 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ceabba-3a0d-417c-ae23-37f6800fa1ff","Type":"ContainerDied","Data":"df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1"} Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.182983 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ceabba-3a0d-417c-ae23-37f6800fa1ff","Type":"ContainerDied","Data":"5f62d109d53288105f73c101e2916bce592a252e74e5633e12fc48114bcd47cc"} Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.183051 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.205953 4940 scope.go:117] "RemoveContainer" containerID="f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.244495 4940 scope.go:117] "RemoveContainer" containerID="a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f" Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.244966 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f\": container with ID starting with a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f not found: ID does not exist" containerID="a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.245001 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f"} err="failed to get container status \"a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f\": rpc error: code = NotFound desc = could not find container \"a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f\": container with ID starting with a61d1a424f51f80e66d6e2ac5e0147dfef7d5d7d28de2b0f771950d65599c18f not found: ID does not exist" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.245022 4940 scope.go:117] "RemoveContainer" containerID="f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d" Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.246180 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d\": container with ID starting with f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d not found: ID does not exist" containerID="f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.246205 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d"} err="failed to get container status \"f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d\": rpc error: code = NotFound desc = could not find container \"f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d\": container with ID starting with f1895b518b44ff2879021704d32410e2771b0aaddb07edf8786b173a78a5c81d not found: ID does not exist" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.246218 4940 scope.go:117] "RemoveContainer" containerID="df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.264561 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-combined-ca-bundle\") pod \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.264623 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwtpw\" (UniqueName: \"kubernetes.io/projected/77ceabba-3a0d-417c-ae23-37f6800fa1ff-kube-api-access-wwtpw\") pod \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.264665 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c47wh\" (UniqueName: \"kubernetes.io/projected/8895109f-09f0-46b6-92af-ea6f0d61a836-kube-api-access-c47wh\") pod \"8895109f-09f0-46b6-92af-ea6f0d61a836\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.264744 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ceabba-3a0d-417c-ae23-37f6800fa1ff-logs\") pod \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.264762 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8895109f-09f0-46b6-92af-ea6f0d61a836-logs\") pod \"8895109f-09f0-46b6-92af-ea6f0d61a836\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.264877 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-config-data\") pod \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\" (UID: \"77ceabba-3a0d-417c-ae23-37f6800fa1ff\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.264911 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-config-data\") pod \"8895109f-09f0-46b6-92af-ea6f0d61a836\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.264927 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-combined-ca-bundle\") pod \"8895109f-09f0-46b6-92af-ea6f0d61a836\" (UID: \"8895109f-09f0-46b6-92af-ea6f0d61a836\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.266863 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8895109f-09f0-46b6-92af-ea6f0d61a836-logs" (OuterVolumeSpecName: "logs") pod "8895109f-09f0-46b6-92af-ea6f0d61a836" (UID: "8895109f-09f0-46b6-92af-ea6f0d61a836"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.267302 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77ceabba-3a0d-417c-ae23-37f6800fa1ff-logs" (OuterVolumeSpecName: "logs") pod "77ceabba-3a0d-417c-ae23-37f6800fa1ff" (UID: "77ceabba-3a0d-417c-ae23-37f6800fa1ff"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.282194 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77ceabba-3a0d-417c-ae23-37f6800fa1ff-kube-api-access-wwtpw" (OuterVolumeSpecName: "kube-api-access-wwtpw") pod "77ceabba-3a0d-417c-ae23-37f6800fa1ff" (UID: "77ceabba-3a0d-417c-ae23-37f6800fa1ff"). InnerVolumeSpecName "kube-api-access-wwtpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.292556 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-config-data" (OuterVolumeSpecName: "config-data") pod "77ceabba-3a0d-417c-ae23-37f6800fa1ff" (UID: "77ceabba-3a0d-417c-ae23-37f6800fa1ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.292885 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8895109f-09f0-46b6-92af-ea6f0d61a836-kube-api-access-c47wh" (OuterVolumeSpecName: "kube-api-access-c47wh") pod "8895109f-09f0-46b6-92af-ea6f0d61a836" (UID: "8895109f-09f0-46b6-92af-ea6f0d61a836"). InnerVolumeSpecName "kube-api-access-c47wh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.295214 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8895109f-09f0-46b6-92af-ea6f0d61a836" (UID: "8895109f-09f0-46b6-92af-ea6f0d61a836"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.295370 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77ceabba-3a0d-417c-ae23-37f6800fa1ff" (UID: "77ceabba-3a0d-417c-ae23-37f6800fa1ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.298398 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-config-data" (OuterVolumeSpecName: "config-data") pod "8895109f-09f0-46b6-92af-ea6f0d61a836" (UID: "8895109f-09f0-46b6-92af-ea6f0d61a836"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.322279 4940 scope.go:117] "RemoveContainer" containerID="47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.366889 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.366923 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.366940 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8895109f-09f0-46b6-92af-ea6f0d61a836-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.366954 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ceabba-3a0d-417c-ae23-37f6800fa1ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.366967 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwtpw\" (UniqueName: \"kubernetes.io/projected/77ceabba-3a0d-417c-ae23-37f6800fa1ff-kube-api-access-wwtpw\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.366980 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c47wh\" (UniqueName: \"kubernetes.io/projected/8895109f-09f0-46b6-92af-ea6f0d61a836-kube-api-access-c47wh\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.366993 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ceabba-3a0d-417c-ae23-37f6800fa1ff-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.367004 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8895109f-09f0-46b6-92af-ea6f0d61a836-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.392598 4940 scope.go:117] "RemoveContainer" containerID="df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1" Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.393437 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1\": container with ID starting with df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1 not found: ID does not exist" containerID="df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.393491 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1"} err="failed to get container status \"df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1\": rpc error: code = NotFound desc = could not find container \"df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1\": container with ID starting with df81316aa7a050d534bc9170d3b4e3bb543539cba79c3a16e0734938db274bc1 not found: ID does not exist" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.393512 4940 scope.go:117] "RemoveContainer" containerID="47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9" Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.394152 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9\": container with ID starting with 47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9 not found: ID does not exist" containerID="47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.394177 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9"} err="failed to get container status \"47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9\": rpc error: code = NotFound desc = could not find container \"47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9\": container with ID starting with 47757cbf927f65f6fad342b9a35026834c02805791c248bb9ec125487057ece9 not found: ID does not exist" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.543464 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.583179 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.616802 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.626948 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.654636 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.661704 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-log" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.661744 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-log" Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.661790 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-log" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.661797 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-log" Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.661813 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbfeccf1-3acd-4316-b412-e2a4481b54d4" containerName="nova-manage" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.661819 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbfeccf1-3acd-4316-b412-e2a4481b54d4" containerName="nova-manage" Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.661847 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-api" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.661853 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-api" Nov 26 08:51:06 crc kubenswrapper[4940]: E1126 08:51:06.661894 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-metadata" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.661900 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-metadata" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.663309 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbfeccf1-3acd-4316-b412-e2a4481b54d4" containerName="nova-manage" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.663336 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-metadata" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.663353 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" containerName="nova-metadata-log" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.663459 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-api" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.663491 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" containerName="nova-api-log" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.668057 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.676366 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.680903 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.683877 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.689501 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.701143 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.718229 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.722605 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.796257 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lv2f\" (UniqueName: \"kubernetes.io/projected/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-kube-api-access-8lv2f\") pod \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.796761 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-config-data\") pod \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.796909 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-combined-ca-bundle\") pod \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\" (UID: \"c1bc67c1-09f2-40b3-ba5c-61ed69a18368\") " Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.797435 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94022cb9-d378-4c8e-b06c-1cd527c59638-logs\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.797950 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.798107 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.798693 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0a1949-6143-4903-a56f-a513aad540e4-logs\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.798816 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-config-data\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.799292 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-config-data\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.799567 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f74mk\" (UniqueName: \"kubernetes.io/projected/94022cb9-d378-4c8e-b06c-1cd527c59638-kube-api-access-f74mk\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.800101 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnnt6\" (UniqueName: \"kubernetes.io/projected/ac0a1949-6143-4903-a56f-a513aad540e4-kube-api-access-tnnt6\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.813903 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-kube-api-access-8lv2f" (OuterVolumeSpecName: "kube-api-access-8lv2f") pod "c1bc67c1-09f2-40b3-ba5c-61ed69a18368" (UID: "c1bc67c1-09f2-40b3-ba5c-61ed69a18368"). InnerVolumeSpecName "kube-api-access-8lv2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.823113 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1bc67c1-09f2-40b3-ba5c-61ed69a18368" (UID: "c1bc67c1-09f2-40b3-ba5c-61ed69a18368"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.827226 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-config-data" (OuterVolumeSpecName: "config-data") pod "c1bc67c1-09f2-40b3-ba5c-61ed69a18368" (UID: "c1bc67c1-09f2-40b3-ba5c-61ed69a18368"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901646 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnnt6\" (UniqueName: \"kubernetes.io/projected/ac0a1949-6143-4903-a56f-a513aad540e4-kube-api-access-tnnt6\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901706 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94022cb9-d378-4c8e-b06c-1cd527c59638-logs\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901747 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901791 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901813 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0a1949-6143-4903-a56f-a513aad540e4-logs\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901827 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-config-data\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901848 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-config-data\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901897 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f74mk\" (UniqueName: \"kubernetes.io/projected/94022cb9-d378-4c8e-b06c-1cd527c59638-kube-api-access-f74mk\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901951 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901963 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lv2f\" (UniqueName: \"kubernetes.io/projected/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-kube-api-access-8lv2f\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.901973 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1bc67c1-09f2-40b3-ba5c-61ed69a18368-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.902802 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94022cb9-d378-4c8e-b06c-1cd527c59638-logs\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.902880 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0a1949-6143-4903-a56f-a513aad540e4-logs\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.905433 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.910586 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.911334 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-config-data\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.912275 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-config-data\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.921706 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnnt6\" (UniqueName: \"kubernetes.io/projected/ac0a1949-6143-4903-a56f-a513aad540e4-kube-api-access-tnnt6\") pod \"nova-api-0\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " pod="openstack/nova-api-0" Nov 26 08:51:06 crc kubenswrapper[4940]: I1126 08:51:06.925094 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f74mk\" (UniqueName: \"kubernetes.io/projected/94022cb9-d378-4c8e-b06c-1cd527c59638-kube-api-access-f74mk\") pod \"nova-metadata-0\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " pod="openstack/nova-metadata-0" Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.038828 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.057848 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.185640 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77ceabba-3a0d-417c-ae23-37f6800fa1ff" path="/var/lib/kubelet/pods/77ceabba-3a0d-417c-ae23-37f6800fa1ff/volumes" Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.186823 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8895109f-09f0-46b6-92af-ea6f0d61a836" path="/var/lib/kubelet/pods/8895109f-09f0-46b6-92af-ea6f0d61a836/volumes" Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.254404 4940 generic.go:334] "Generic (PLEG): container finished" podID="c1bc67c1-09f2-40b3-ba5c-61ed69a18368" containerID="9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d" exitCode=0 Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.254461 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c1bc67c1-09f2-40b3-ba5c-61ed69a18368","Type":"ContainerDied","Data":"9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d"} Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.254498 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c1bc67c1-09f2-40b3-ba5c-61ed69a18368","Type":"ContainerDied","Data":"82a7dff975fd3f4ebfa142b5e3eb1015069014709d99d354c11da1bf74f8899b"} Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.254519 4940 scope.go:117] "RemoveContainer" containerID="9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d" Nov 26 08:51:07 crc kubenswrapper[4940]: I1126 08:51:07.254738 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.313797 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.327113 4940 scope.go:117] "RemoveContainer" containerID="9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d" Nov 26 08:51:08 crc kubenswrapper[4940]: E1126 08:51:07.338226 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d\": container with ID starting with 9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d not found: ID does not exist" containerID="9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.338283 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d"} err="failed to get container status \"9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d\": rpc error: code = NotFound desc = could not find container \"9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d\": container with ID starting with 9afb5175467ae883eabd6ca7d831d0fa34f6e10a928e7aeffcb449f2ff472a9d not found: ID does not exist" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.356111 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.400812 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:51:08 crc kubenswrapper[4940]: E1126 08:51:07.401292 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1bc67c1-09f2-40b3-ba5c-61ed69a18368" containerName="nova-scheduler-scheduler" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.401307 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1bc67c1-09f2-40b3-ba5c-61ed69a18368" containerName="nova-scheduler-scheduler" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.401565 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1bc67c1-09f2-40b3-ba5c-61ed69a18368" containerName="nova-scheduler-scheduler" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.402312 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.405270 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.454328 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.522319 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t66g5\" (UniqueName: \"kubernetes.io/projected/849696c1-5809-4976-a4cc-b05f4432d07b-kube-api-access-t66g5\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.522365 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-config-data\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.522403 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.625534 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t66g5\" (UniqueName: \"kubernetes.io/projected/849696c1-5809-4976-a4cc-b05f4432d07b-kube-api-access-t66g5\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.626127 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-config-data\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.626203 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.632366 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.632401 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-config-data\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.657752 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t66g5\" (UniqueName: \"kubernetes.io/projected/849696c1-5809-4976-a4cc-b05f4432d07b-kube-api-access-t66g5\") pod \"nova-scheduler-0\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:07.765798 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:08.370973 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:51:08 crc kubenswrapper[4940]: W1126 08:51:08.374320 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac0a1949_6143_4903_a56f_a513aad540e4.slice/crio-31076bbefd5c71a3ae5512547a5be368d6fe568b172370f71347ecf9491ad91c WatchSource:0}: Error finding container 31076bbefd5c71a3ae5512547a5be368d6fe568b172370f71347ecf9491ad91c: Status 404 returned error can't find the container with id 31076bbefd5c71a3ae5512547a5be368d6fe568b172370f71347ecf9491ad91c Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:08.385591 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:51:08 crc kubenswrapper[4940]: I1126 08:51:08.467108 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:51:08 crc kubenswrapper[4940]: W1126 08:51:08.474708 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod849696c1_5809_4976_a4cc_b05f4432d07b.slice/crio-eba11557834f168f812046e7dc8c76e1865dd110fccb5120406bd55251852f26 WatchSource:0}: Error finding container eba11557834f168f812046e7dc8c76e1865dd110fccb5120406bd55251852f26: Status 404 returned error can't find the container with id eba11557834f168f812046e7dc8c76e1865dd110fccb5120406bd55251852f26 Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.219146 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1bc67c1-09f2-40b3-ba5c-61ed69a18368" path="/var/lib/kubelet/pods/c1bc67c1-09f2-40b3-ba5c-61ed69a18368/volumes" Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.283034 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"849696c1-5809-4976-a4cc-b05f4432d07b","Type":"ContainerStarted","Data":"2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c"} Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.283091 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"849696c1-5809-4976-a4cc-b05f4432d07b","Type":"ContainerStarted","Data":"eba11557834f168f812046e7dc8c76e1865dd110fccb5120406bd55251852f26"} Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.285493 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"94022cb9-d378-4c8e-b06c-1cd527c59638","Type":"ContainerStarted","Data":"7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3"} Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.285744 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"94022cb9-d378-4c8e-b06c-1cd527c59638","Type":"ContainerStarted","Data":"1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07"} Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.285756 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"94022cb9-d378-4c8e-b06c-1cd527c59638","Type":"ContainerStarted","Data":"868d1d57acb5bd3a5712667a1bc9f1e7532e7fe4c905024561fb732814a232ce"} Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.287197 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac0a1949-6143-4903-a56f-a513aad540e4","Type":"ContainerStarted","Data":"d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78"} Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.287233 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac0a1949-6143-4903-a56f-a513aad540e4","Type":"ContainerStarted","Data":"23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73"} Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.287243 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac0a1949-6143-4903-a56f-a513aad540e4","Type":"ContainerStarted","Data":"31076bbefd5c71a3ae5512547a5be368d6fe568b172370f71347ecf9491ad91c"} Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.305883 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.305864972 podStartE2EDuration="2.305864972s" podCreationTimestamp="2025-11-26 08:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:51:09.297868078 +0000 UTC m=+6970.818009717" watchObservedRunningTime="2025-11-26 08:51:09.305864972 +0000 UTC m=+6970.826006591" Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.325657 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.325632401 podStartE2EDuration="3.325632401s" podCreationTimestamp="2025-11-26 08:51:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:51:09.317827772 +0000 UTC m=+6970.837969401" watchObservedRunningTime="2025-11-26 08:51:09.325632401 +0000 UTC m=+6970.845774020" Nov 26 08:51:09 crc kubenswrapper[4940]: I1126 08:51:09.343699 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.343679484 podStartE2EDuration="3.343679484s" podCreationTimestamp="2025-11-26 08:51:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:51:09.333299964 +0000 UTC m=+6970.853441583" watchObservedRunningTime="2025-11-26 08:51:09.343679484 +0000 UTC m=+6970.863821103" Nov 26 08:51:11 crc kubenswrapper[4940]: I1126 08:51:11.893871 4940 scope.go:117] "RemoveContainer" containerID="2c42738d0842e326477ab3aa88798d8dca2300e3a76104cfad051d8a0bb6421d" Nov 26 08:51:12 crc kubenswrapper[4940]: I1126 08:51:12.039983 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 08:51:12 crc kubenswrapper[4940]: I1126 08:51:12.040029 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 08:51:12 crc kubenswrapper[4940]: I1126 08:51:12.766464 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 08:51:17 crc kubenswrapper[4940]: I1126 08:51:17.039368 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 08:51:17 crc kubenswrapper[4940]: I1126 08:51:17.040177 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 08:51:17 crc kubenswrapper[4940]: I1126 08:51:17.059116 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 08:51:17 crc kubenswrapper[4940]: I1126 08:51:17.059197 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 08:51:17 crc kubenswrapper[4940]: I1126 08:51:17.766723 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 08:51:17 crc kubenswrapper[4940]: I1126 08:51:17.825415 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 08:51:18 crc kubenswrapper[4940]: I1126 08:51:18.165444 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.114:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:51:18 crc kubenswrapper[4940]: I1126 08:51:18.165745 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.113:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:51:18 crc kubenswrapper[4940]: I1126 08:51:18.165786 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.113:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:51:18 crc kubenswrapper[4940]: I1126 08:51:18.165838 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.114:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:51:18 crc kubenswrapper[4940]: I1126 08:51:18.401704 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.043403 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.044095 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.046012 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.050192 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.065251 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.065728 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.067321 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.082495 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.467078 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.471933 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.652349 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66cd6ddd57-zxnqx"] Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.653989 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.669136 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66cd6ddd57-zxnqx"] Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.814814 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-sb\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.815136 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tvmm\" (UniqueName: \"kubernetes.io/projected/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-kube-api-access-6tvmm\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.815233 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-dns-svc\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.815265 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-nb\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.815311 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-config\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.917402 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tvmm\" (UniqueName: \"kubernetes.io/projected/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-kube-api-access-6tvmm\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.917824 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-dns-svc\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.917851 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-nb\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.917878 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-config\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.917901 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-sb\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.918857 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-nb\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.919291 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-dns-svc\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.919324 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-config\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.919421 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-sb\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.936089 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tvmm\" (UniqueName: \"kubernetes.io/projected/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-kube-api-access-6tvmm\") pod \"dnsmasq-dns-66cd6ddd57-zxnqx\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:27 crc kubenswrapper[4940]: I1126 08:51:27.980822 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:28 crc kubenswrapper[4940]: I1126 08:51:28.451912 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66cd6ddd57-zxnqx"] Nov 26 08:51:28 crc kubenswrapper[4940]: W1126 08:51:28.453689 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5bbbe50b_2ffe_44b6_b1b8_07ca6634eacc.slice/crio-1c87691ac8081cf3af4b890456e385220c51b37ccecbf4825e03801bd5c2b192 WatchSource:0}: Error finding container 1c87691ac8081cf3af4b890456e385220c51b37ccecbf4825e03801bd5c2b192: Status 404 returned error can't find the container with id 1c87691ac8081cf3af4b890456e385220c51b37ccecbf4825e03801bd5c2b192 Nov 26 08:51:28 crc kubenswrapper[4940]: I1126 08:51:28.476198 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" event={"ID":"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc","Type":"ContainerStarted","Data":"1c87691ac8081cf3af4b890456e385220c51b37ccecbf4825e03801bd5c2b192"} Nov 26 08:51:29 crc kubenswrapper[4940]: I1126 08:51:29.491877 4940 generic.go:334] "Generic (PLEG): container finished" podID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" containerID="681127c380e6dd6dfd7a565884ae9d032f2c248b36d384127c76696ab5379b11" exitCode=0 Nov 26 08:51:29 crc kubenswrapper[4940]: I1126 08:51:29.491943 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" event={"ID":"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc","Type":"ContainerDied","Data":"681127c380e6dd6dfd7a565884ae9d032f2c248b36d384127c76696ab5379b11"} Nov 26 08:51:30 crc kubenswrapper[4940]: I1126 08:51:30.502891 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" event={"ID":"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc","Type":"ContainerStarted","Data":"8ec56c4a91170dd3daf40da315ed56e1a0d744f2d2109451e6dec2dfbe053f3b"} Nov 26 08:51:30 crc kubenswrapper[4940]: I1126 08:51:30.503380 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:30 crc kubenswrapper[4940]: I1126 08:51:30.532077 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" podStartSLOduration=3.532057664 podStartE2EDuration="3.532057664s" podCreationTimestamp="2025-11-26 08:51:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:51:30.524032609 +0000 UTC m=+6992.044174228" watchObservedRunningTime="2025-11-26 08:51:30.532057664 +0000 UTC m=+6992.052199283" Nov 26 08:51:37 crc kubenswrapper[4940]: I1126 08:51:37.982219 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.072075 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f4579ddb7-7k4n2"] Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.072748 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" podUID="83232608-2b13-4451-a72b-5638f3d2d55a" containerName="dnsmasq-dns" containerID="cri-o://e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b" gracePeriod=10 Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.551279 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.601688 4940 generic.go:334] "Generic (PLEG): container finished" podID="83232608-2b13-4451-a72b-5638f3d2d55a" containerID="e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b" exitCode=0 Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.601749 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" event={"ID":"83232608-2b13-4451-a72b-5638f3d2d55a","Type":"ContainerDied","Data":"e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b"} Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.601785 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" event={"ID":"83232608-2b13-4451-a72b-5638f3d2d55a","Type":"ContainerDied","Data":"afeee777c04e53ad29f26b21b5c70103fa14a296b4d62ce52f347cbcaca718dc"} Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.601828 4940 scope.go:117] "RemoveContainer" containerID="e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.602094 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f4579ddb7-7k4n2" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.633392 4940 scope.go:117] "RemoveContainer" containerID="30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.659294 4940 scope.go:117] "RemoveContainer" containerID="e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b" Nov 26 08:51:38 crc kubenswrapper[4940]: E1126 08:51:38.659730 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b\": container with ID starting with e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b not found: ID does not exist" containerID="e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.659773 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b"} err="failed to get container status \"e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b\": rpc error: code = NotFound desc = could not find container \"e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b\": container with ID starting with e5969b9eeab1d43e49affe3be850fc9adf7932efd7c26160e17453dbcf17d75b not found: ID does not exist" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.659800 4940 scope.go:117] "RemoveContainer" containerID="30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75" Nov 26 08:51:38 crc kubenswrapper[4940]: E1126 08:51:38.660435 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75\": container with ID starting with 30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75 not found: ID does not exist" containerID="30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.660463 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75"} err="failed to get container status \"30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75\": rpc error: code = NotFound desc = could not find container \"30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75\": container with ID starting with 30b6f2e69144543d01ff3ad3abd83624125a3ca2920f9ab02d51542829507f75 not found: ID does not exist" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.679137 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbll4\" (UniqueName: \"kubernetes.io/projected/83232608-2b13-4451-a72b-5638f3d2d55a-kube-api-access-rbll4\") pod \"83232608-2b13-4451-a72b-5638f3d2d55a\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.679191 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-sb\") pod \"83232608-2b13-4451-a72b-5638f3d2d55a\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.679399 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-config\") pod \"83232608-2b13-4451-a72b-5638f3d2d55a\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.679441 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-dns-svc\") pod \"83232608-2b13-4451-a72b-5638f3d2d55a\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.679489 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-nb\") pod \"83232608-2b13-4451-a72b-5638f3d2d55a\" (UID: \"83232608-2b13-4451-a72b-5638f3d2d55a\") " Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.687937 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83232608-2b13-4451-a72b-5638f3d2d55a-kube-api-access-rbll4" (OuterVolumeSpecName: "kube-api-access-rbll4") pod "83232608-2b13-4451-a72b-5638f3d2d55a" (UID: "83232608-2b13-4451-a72b-5638f3d2d55a"). InnerVolumeSpecName "kube-api-access-rbll4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.727915 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "83232608-2b13-4451-a72b-5638f3d2d55a" (UID: "83232608-2b13-4451-a72b-5638f3d2d55a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.729755 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "83232608-2b13-4451-a72b-5638f3d2d55a" (UID: "83232608-2b13-4451-a72b-5638f3d2d55a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.732272 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "83232608-2b13-4451-a72b-5638f3d2d55a" (UID: "83232608-2b13-4451-a72b-5638f3d2d55a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.743977 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-config" (OuterVolumeSpecName: "config") pod "83232608-2b13-4451-a72b-5638f3d2d55a" (UID: "83232608-2b13-4451-a72b-5638f3d2d55a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.781423 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.781460 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.781475 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbll4\" (UniqueName: \"kubernetes.io/projected/83232608-2b13-4451-a72b-5638f3d2d55a-kube-api-access-rbll4\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.781484 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.781494 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83232608-2b13-4451-a72b-5638f3d2d55a-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.947963 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f4579ddb7-7k4n2"] Nov 26 08:51:38 crc kubenswrapper[4940]: I1126 08:51:38.960952 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f4579ddb7-7k4n2"] Nov 26 08:51:39 crc kubenswrapper[4940]: I1126 08:51:39.177826 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83232608-2b13-4451-a72b-5638f3d2d55a" path="/var/lib/kubelet/pods/83232608-2b13-4451-a72b-5638f3d2d55a/volumes" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.328108 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-xql5f"] Nov 26 08:51:40 crc kubenswrapper[4940]: E1126 08:51:40.329365 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83232608-2b13-4451-a72b-5638f3d2d55a" containerName="init" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.329454 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="83232608-2b13-4451-a72b-5638f3d2d55a" containerName="init" Nov 26 08:51:40 crc kubenswrapper[4940]: E1126 08:51:40.329514 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83232608-2b13-4451-a72b-5638f3d2d55a" containerName="dnsmasq-dns" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.329565 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="83232608-2b13-4451-a72b-5638f3d2d55a" containerName="dnsmasq-dns" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.329815 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="83232608-2b13-4451-a72b-5638f3d2d55a" containerName="dnsmasq-dns" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.330477 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.349033 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-xql5f"] Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.414455 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n84nt\" (UniqueName: \"kubernetes.io/projected/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-kube-api-access-n84nt\") pod \"cinder-db-create-xql5f\" (UID: \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\") " pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.414580 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-operator-scripts\") pod \"cinder-db-create-xql5f\" (UID: \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\") " pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.436035 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-19da-account-create-update-g2knf"] Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.437915 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.439976 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.451733 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-19da-account-create-update-g2knf"] Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.516527 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-operator-scripts\") pod \"cinder-19da-account-create-update-g2knf\" (UID: \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\") " pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.516747 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqlmr\" (UniqueName: \"kubernetes.io/projected/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-kube-api-access-bqlmr\") pod \"cinder-19da-account-create-update-g2knf\" (UID: \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\") " pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.516907 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n84nt\" (UniqueName: \"kubernetes.io/projected/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-kube-api-access-n84nt\") pod \"cinder-db-create-xql5f\" (UID: \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\") " pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.517139 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-operator-scripts\") pod \"cinder-db-create-xql5f\" (UID: \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\") " pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.517936 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-operator-scripts\") pod \"cinder-db-create-xql5f\" (UID: \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\") " pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.540400 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n84nt\" (UniqueName: \"kubernetes.io/projected/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-kube-api-access-n84nt\") pod \"cinder-db-create-xql5f\" (UID: \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\") " pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.620176 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-operator-scripts\") pod \"cinder-19da-account-create-update-g2knf\" (UID: \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\") " pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.620222 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqlmr\" (UniqueName: \"kubernetes.io/projected/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-kube-api-access-bqlmr\") pod \"cinder-19da-account-create-update-g2knf\" (UID: \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\") " pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.620965 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-operator-scripts\") pod \"cinder-19da-account-create-update-g2knf\" (UID: \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\") " pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.635878 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqlmr\" (UniqueName: \"kubernetes.io/projected/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-kube-api-access-bqlmr\") pod \"cinder-19da-account-create-update-g2knf\" (UID: \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\") " pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.649694 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:40 crc kubenswrapper[4940]: I1126 08:51:40.754431 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:41 crc kubenswrapper[4940]: I1126 08:51:41.077973 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-xql5f"] Nov 26 08:51:41 crc kubenswrapper[4940]: I1126 08:51:41.225318 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-19da-account-create-update-g2knf"] Nov 26 08:51:41 crc kubenswrapper[4940]: W1126 08:51:41.226025 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb33a04f0_2a5a_4cb0_82f2_d45d62216aa0.slice/crio-5a95920fac347d7b47ada61b1b224cf6e2051cb2f20b3eb339764d21925a6e6d WatchSource:0}: Error finding container 5a95920fac347d7b47ada61b1b224cf6e2051cb2f20b3eb339764d21925a6e6d: Status 404 returned error can't find the container with id 5a95920fac347d7b47ada61b1b224cf6e2051cb2f20b3eb339764d21925a6e6d Nov 26 08:51:41 crc kubenswrapper[4940]: I1126 08:51:41.636051 4940 generic.go:334] "Generic (PLEG): container finished" podID="b33a04f0-2a5a-4cb0-82f2-d45d62216aa0" containerID="0c7c5f5ad9ba4e1c3e7713d00d6247eb6be9d9a846684b84076854608f4f5e34" exitCode=0 Nov 26 08:51:41 crc kubenswrapper[4940]: I1126 08:51:41.636167 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-19da-account-create-update-g2knf" event={"ID":"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0","Type":"ContainerDied","Data":"0c7c5f5ad9ba4e1c3e7713d00d6247eb6be9d9a846684b84076854608f4f5e34"} Nov 26 08:51:41 crc kubenswrapper[4940]: I1126 08:51:41.636413 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-19da-account-create-update-g2knf" event={"ID":"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0","Type":"ContainerStarted","Data":"5a95920fac347d7b47ada61b1b224cf6e2051cb2f20b3eb339764d21925a6e6d"} Nov 26 08:51:41 crc kubenswrapper[4940]: I1126 08:51:41.638761 4940 generic.go:334] "Generic (PLEG): container finished" podID="43dbf499-08fa-48d2-baab-dfcf7fac7d9e" containerID="e81e18353732eae8341036de7873c52efffce6b957f67be90a2fb3559a29d1b5" exitCode=0 Nov 26 08:51:41 crc kubenswrapper[4940]: I1126 08:51:41.638804 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xql5f" event={"ID":"43dbf499-08fa-48d2-baab-dfcf7fac7d9e","Type":"ContainerDied","Data":"e81e18353732eae8341036de7873c52efffce6b957f67be90a2fb3559a29d1b5"} Nov 26 08:51:41 crc kubenswrapper[4940]: I1126 08:51:41.638842 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xql5f" event={"ID":"43dbf499-08fa-48d2-baab-dfcf7fac7d9e","Type":"ContainerStarted","Data":"8e1c64ba9983b29eb471346c7bf8888f420b3ed30f64bb6110f1a1dac843d5e4"} Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.096297 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.103483 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.166518 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n84nt\" (UniqueName: \"kubernetes.io/projected/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-kube-api-access-n84nt\") pod \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\" (UID: \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\") " Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.166596 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqlmr\" (UniqueName: \"kubernetes.io/projected/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-kube-api-access-bqlmr\") pod \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\" (UID: \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\") " Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.166623 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-operator-scripts\") pod \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\" (UID: \"43dbf499-08fa-48d2-baab-dfcf7fac7d9e\") " Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.166680 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-operator-scripts\") pod \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\" (UID: \"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0\") " Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.167295 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "43dbf499-08fa-48d2-baab-dfcf7fac7d9e" (UID: "43dbf499-08fa-48d2-baab-dfcf7fac7d9e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.167679 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b33a04f0-2a5a-4cb0-82f2-d45d62216aa0" (UID: "b33a04f0-2a5a-4cb0-82f2-d45d62216aa0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.168329 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.168348 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.171818 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-kube-api-access-bqlmr" (OuterVolumeSpecName: "kube-api-access-bqlmr") pod "b33a04f0-2a5a-4cb0-82f2-d45d62216aa0" (UID: "b33a04f0-2a5a-4cb0-82f2-d45d62216aa0"). InnerVolumeSpecName "kube-api-access-bqlmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.172502 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-kube-api-access-n84nt" (OuterVolumeSpecName: "kube-api-access-n84nt") pod "43dbf499-08fa-48d2-baab-dfcf7fac7d9e" (UID: "43dbf499-08fa-48d2-baab-dfcf7fac7d9e"). InnerVolumeSpecName "kube-api-access-n84nt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.269658 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n84nt\" (UniqueName: \"kubernetes.io/projected/43dbf499-08fa-48d2-baab-dfcf7fac7d9e-kube-api-access-n84nt\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.269691 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqlmr\" (UniqueName: \"kubernetes.io/projected/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0-kube-api-access-bqlmr\") on node \"crc\" DevicePath \"\"" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.666907 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-19da-account-create-update-g2knf" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.666920 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-19da-account-create-update-g2knf" event={"ID":"b33a04f0-2a5a-4cb0-82f2-d45d62216aa0","Type":"ContainerDied","Data":"5a95920fac347d7b47ada61b1b224cf6e2051cb2f20b3eb339764d21925a6e6d"} Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.667605 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a95920fac347d7b47ada61b1b224cf6e2051cb2f20b3eb339764d21925a6e6d" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.668759 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xql5f" event={"ID":"43dbf499-08fa-48d2-baab-dfcf7fac7d9e","Type":"ContainerDied","Data":"8e1c64ba9983b29eb471346c7bf8888f420b3ed30f64bb6110f1a1dac843d5e4"} Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.668797 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e1c64ba9983b29eb471346c7bf8888f420b3ed30f64bb6110f1a1dac843d5e4" Nov 26 08:51:43 crc kubenswrapper[4940]: I1126 08:51:43.668851 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xql5f" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.748376 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-m9v5m"] Nov 26 08:51:45 crc kubenswrapper[4940]: E1126 08:51:45.748962 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43dbf499-08fa-48d2-baab-dfcf7fac7d9e" containerName="mariadb-database-create" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.748976 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="43dbf499-08fa-48d2-baab-dfcf7fac7d9e" containerName="mariadb-database-create" Nov 26 08:51:45 crc kubenswrapper[4940]: E1126 08:51:45.749003 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b33a04f0-2a5a-4cb0-82f2-d45d62216aa0" containerName="mariadb-account-create-update" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.749010 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b33a04f0-2a5a-4cb0-82f2-d45d62216aa0" containerName="mariadb-account-create-update" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.749219 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b33a04f0-2a5a-4cb0-82f2-d45d62216aa0" containerName="mariadb-account-create-update" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.749243 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="43dbf499-08fa-48d2-baab-dfcf7fac7d9e" containerName="mariadb-database-create" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.749901 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.753774 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-464tg" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.753879 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.753913 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.760274 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-m9v5m"] Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.820748 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-db-sync-config-data\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.820814 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c9f01f4-966a-4757-9bc1-a7097089f833-etc-machine-id\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.820841 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-config-data\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.820876 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-scripts\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.820905 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjx82\" (UniqueName: \"kubernetes.io/projected/8c9f01f4-966a-4757-9bc1-a7097089f833-kube-api-access-kjx82\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.820964 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-combined-ca-bundle\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.922497 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-db-sync-config-data\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.922606 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c9f01f4-966a-4757-9bc1-a7097089f833-etc-machine-id\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.922639 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-config-data\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.922685 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-scripts\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.922727 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjx82\" (UniqueName: \"kubernetes.io/projected/8c9f01f4-966a-4757-9bc1-a7097089f833-kube-api-access-kjx82\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.922806 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-combined-ca-bundle\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.924102 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c9f01f4-966a-4757-9bc1-a7097089f833-etc-machine-id\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.931054 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-scripts\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.931177 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-combined-ca-bundle\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.931434 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-config-data\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.932313 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-db-sync-config-data\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:45 crc kubenswrapper[4940]: I1126 08:51:45.945216 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjx82\" (UniqueName: \"kubernetes.io/projected/8c9f01f4-966a-4757-9bc1-a7097089f833-kube-api-access-kjx82\") pod \"cinder-db-sync-m9v5m\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:46 crc kubenswrapper[4940]: I1126 08:51:46.078013 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:51:46 crc kubenswrapper[4940]: W1126 08:51:46.586859 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c9f01f4_966a_4757_9bc1_a7097089f833.slice/crio-32a996f73a0156482c60708e12f37192272c0cdddd25019c7b99428352ba7bea WatchSource:0}: Error finding container 32a996f73a0156482c60708e12f37192272c0cdddd25019c7b99428352ba7bea: Status 404 returned error can't find the container with id 32a996f73a0156482c60708e12f37192272c0cdddd25019c7b99428352ba7bea Nov 26 08:51:46 crc kubenswrapper[4940]: I1126 08:51:46.588463 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-m9v5m"] Nov 26 08:51:46 crc kubenswrapper[4940]: I1126 08:51:46.719468 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-m9v5m" event={"ID":"8c9f01f4-966a-4757-9bc1-a7097089f833","Type":"ContainerStarted","Data":"32a996f73a0156482c60708e12f37192272c0cdddd25019c7b99428352ba7bea"} Nov 26 08:52:06 crc kubenswrapper[4940]: E1126 08:52:06.089705 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:1f5c0439f2433cb462b222a5bb23e629" Nov 26 08:52:06 crc kubenswrapper[4940]: E1126 08:52:06.090366 4940 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:1f5c0439f2433cb462b222a5bb23e629" Nov 26 08:52:06 crc kubenswrapper[4940]: E1126 08:52:06.090569 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kjx82,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-m9v5m_openstack(8c9f01f4-966a-4757-9bc1-a7097089f833): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 08:52:06 crc kubenswrapper[4940]: E1126 08:52:06.091950 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-m9v5m" podUID="8c9f01f4-966a-4757-9bc1-a7097089f833" Nov 26 08:52:06 crc kubenswrapper[4940]: E1126 08:52:06.913936 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:1f5c0439f2433cb462b222a5bb23e629\\\"\"" pod="openstack/cinder-db-sync-m9v5m" podUID="8c9f01f4-966a-4757-9bc1-a7097089f833" Nov 26 08:52:20 crc kubenswrapper[4940]: I1126 08:52:20.074743 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-m9v5m" event={"ID":"8c9f01f4-966a-4757-9bc1-a7097089f833","Type":"ContainerStarted","Data":"906fd0da694e69586e5e97614105f62417217503434eb4be6ef6c6727c84261a"} Nov 26 08:52:20 crc kubenswrapper[4940]: I1126 08:52:20.106304 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-m9v5m" podStartSLOduration=2.277006749 podStartE2EDuration="35.106271706s" podCreationTimestamp="2025-11-26 08:51:45 +0000 UTC" firstStartedPulling="2025-11-26 08:51:46.589672386 +0000 UTC m=+7008.109814005" lastFinishedPulling="2025-11-26 08:52:19.418937333 +0000 UTC m=+7040.939078962" observedRunningTime="2025-11-26 08:52:20.099001325 +0000 UTC m=+7041.619142954" watchObservedRunningTime="2025-11-26 08:52:20.106271706 +0000 UTC m=+7041.626413365" Nov 26 08:52:23 crc kubenswrapper[4940]: I1126 08:52:23.106563 4940 generic.go:334] "Generic (PLEG): container finished" podID="8c9f01f4-966a-4757-9bc1-a7097089f833" containerID="906fd0da694e69586e5e97614105f62417217503434eb4be6ef6c6727c84261a" exitCode=0 Nov 26 08:52:23 crc kubenswrapper[4940]: I1126 08:52:23.106838 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-m9v5m" event={"ID":"8c9f01f4-966a-4757-9bc1-a7097089f833","Type":"ContainerDied","Data":"906fd0da694e69586e5e97614105f62417217503434eb4be6ef6c6727c84261a"} Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.533272 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.554914 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-scripts\") pod \"8c9f01f4-966a-4757-9bc1-a7097089f833\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.554988 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjx82\" (UniqueName: \"kubernetes.io/projected/8c9f01f4-966a-4757-9bc1-a7097089f833-kube-api-access-kjx82\") pod \"8c9f01f4-966a-4757-9bc1-a7097089f833\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.555028 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-db-sync-config-data\") pod \"8c9f01f4-966a-4757-9bc1-a7097089f833\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.555137 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-config-data\") pod \"8c9f01f4-966a-4757-9bc1-a7097089f833\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.555176 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c9f01f4-966a-4757-9bc1-a7097089f833-etc-machine-id\") pod \"8c9f01f4-966a-4757-9bc1-a7097089f833\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.555212 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-combined-ca-bundle\") pod \"8c9f01f4-966a-4757-9bc1-a7097089f833\" (UID: \"8c9f01f4-966a-4757-9bc1-a7097089f833\") " Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.558639 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8c9f01f4-966a-4757-9bc1-a7097089f833-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8c9f01f4-966a-4757-9bc1-a7097089f833" (UID: "8c9f01f4-966a-4757-9bc1-a7097089f833"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.562622 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c9f01f4-966a-4757-9bc1-a7097089f833-kube-api-access-kjx82" (OuterVolumeSpecName: "kube-api-access-kjx82") pod "8c9f01f4-966a-4757-9bc1-a7097089f833" (UID: "8c9f01f4-966a-4757-9bc1-a7097089f833"). InnerVolumeSpecName "kube-api-access-kjx82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.563654 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-scripts" (OuterVolumeSpecName: "scripts") pod "8c9f01f4-966a-4757-9bc1-a7097089f833" (UID: "8c9f01f4-966a-4757-9bc1-a7097089f833"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.567498 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "8c9f01f4-966a-4757-9bc1-a7097089f833" (UID: "8c9f01f4-966a-4757-9bc1-a7097089f833"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.599412 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c9f01f4-966a-4757-9bc1-a7097089f833" (UID: "8c9f01f4-966a-4757-9bc1-a7097089f833"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.616442 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-config-data" (OuterVolumeSpecName: "config-data") pod "8c9f01f4-966a-4757-9bc1-a7097089f833" (UID: "8c9f01f4-966a-4757-9bc1-a7097089f833"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.657903 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.657948 4940 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c9f01f4-966a-4757-9bc1-a7097089f833-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.657969 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.657985 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.658002 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjx82\" (UniqueName: \"kubernetes.io/projected/8c9f01f4-966a-4757-9bc1-a7097089f833-kube-api-access-kjx82\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:24 crc kubenswrapper[4940]: I1126 08:52:24.658020 4940 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8c9f01f4-966a-4757-9bc1-a7097089f833-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.127603 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-m9v5m" event={"ID":"8c9f01f4-966a-4757-9bc1-a7097089f833","Type":"ContainerDied","Data":"32a996f73a0156482c60708e12f37192272c0cdddd25019c7b99428352ba7bea"} Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.127661 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32a996f73a0156482c60708e12f37192272c0cdddd25019c7b99428352ba7bea" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.127672 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-m9v5m" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.543285 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b8dd447d7-6lp69"] Nov 26 08:52:25 crc kubenswrapper[4940]: E1126 08:52:25.545208 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c9f01f4-966a-4757-9bc1-a7097089f833" containerName="cinder-db-sync" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.545227 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c9f01f4-966a-4757-9bc1-a7097089f833" containerName="cinder-db-sync" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.545397 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c9f01f4-966a-4757-9bc1-a7097089f833" containerName="cinder-db-sync" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.546327 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.563234 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b8dd447d7-6lp69"] Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.574345 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2khsh\" (UniqueName: \"kubernetes.io/projected/26c48474-6fec-46fb-8dd8-64d5c941f5cc-kube-api-access-2khsh\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.574593 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-config\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.574610 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.574655 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.574718 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-dns-svc\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.675688 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-config\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.675730 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.675788 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.675849 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-dns-svc\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.675896 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2khsh\" (UniqueName: \"kubernetes.io/projected/26c48474-6fec-46fb-8dd8-64d5c941f5cc-kube-api-access-2khsh\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.676616 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-config\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.676925 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-dns-svc\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.677132 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.677197 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.715494 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2khsh\" (UniqueName: \"kubernetes.io/projected/26c48474-6fec-46fb-8dd8-64d5c941f5cc-kube-api-access-2khsh\") pod \"dnsmasq-dns-6b8dd447d7-6lp69\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.779480 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.780928 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.783955 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.784192 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-464tg" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.784443 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.784584 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.788868 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.869509 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.981085 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data-custom\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.981406 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.981442 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-scripts\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.981472 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd36bbc0-38d1-4086-9493-e20bc64a60d0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.981495 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.981532 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7t7b\" (UniqueName: \"kubernetes.io/projected/dd36bbc0-38d1-4086-9493-e20bc64a60d0-kube-api-access-v7t7b\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:25 crc kubenswrapper[4940]: I1126 08:52:25.981555 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd36bbc0-38d1-4086-9493-e20bc64a60d0-logs\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.082867 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7t7b\" (UniqueName: \"kubernetes.io/projected/dd36bbc0-38d1-4086-9493-e20bc64a60d0-kube-api-access-v7t7b\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.082920 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd36bbc0-38d1-4086-9493-e20bc64a60d0-logs\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.083000 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data-custom\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.083066 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.083111 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-scripts\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.083149 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd36bbc0-38d1-4086-9493-e20bc64a60d0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.083178 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.090532 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd36bbc0-38d1-4086-9493-e20bc64a60d0-logs\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.090603 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd36bbc0-38d1-4086-9493-e20bc64a60d0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.113628 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.117470 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-scripts\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.127832 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.128601 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data-custom\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.140691 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7t7b\" (UniqueName: \"kubernetes.io/projected/dd36bbc0-38d1-4086-9493-e20bc64a60d0-kube-api-access-v7t7b\") pod \"cinder-api-0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.399083 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.411596 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b8dd447d7-6lp69"] Nov 26 08:52:26 crc kubenswrapper[4940]: I1126 08:52:26.907298 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:52:27 crc kubenswrapper[4940]: I1126 08:52:27.195763 4940 generic.go:334] "Generic (PLEG): container finished" podID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" containerID="8b9626e8993254c771599f187cc3b92b4c14f52e6730f9df88b5c8953dd69b7e" exitCode=0 Nov 26 08:52:27 crc kubenswrapper[4940]: I1126 08:52:27.195992 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" event={"ID":"26c48474-6fec-46fb-8dd8-64d5c941f5cc","Type":"ContainerDied","Data":"8b9626e8993254c771599f187cc3b92b4c14f52e6730f9df88b5c8953dd69b7e"} Nov 26 08:52:27 crc kubenswrapper[4940]: I1126 08:52:27.196084 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" event={"ID":"26c48474-6fec-46fb-8dd8-64d5c941f5cc","Type":"ContainerStarted","Data":"3910b400caf914495146972c200e07df8a20f6e0d0dfc94b660f6c0da4f8c303"} Nov 26 08:52:27 crc kubenswrapper[4940]: I1126 08:52:27.199317 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd36bbc0-38d1-4086-9493-e20bc64a60d0","Type":"ContainerStarted","Data":"673707b5d9df8bb18c85876ebd36f525c45b55ff9396b23661f5547d4758c1e7"} Nov 26 08:52:28 crc kubenswrapper[4940]: I1126 08:52:28.214154 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd36bbc0-38d1-4086-9493-e20bc64a60d0","Type":"ContainerStarted","Data":"639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3"} Nov 26 08:52:28 crc kubenswrapper[4940]: I1126 08:52:28.214768 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd36bbc0-38d1-4086-9493-e20bc64a60d0","Type":"ContainerStarted","Data":"54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d"} Nov 26 08:52:28 crc kubenswrapper[4940]: I1126 08:52:28.214817 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 08:52:28 crc kubenswrapper[4940]: I1126 08:52:28.219167 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" event={"ID":"26c48474-6fec-46fb-8dd8-64d5c941f5cc","Type":"ContainerStarted","Data":"7f0ecccbce780f2ce5f44411070e4ccdad15455ae896bf8a222cd994dcbecf7a"} Nov 26 08:52:28 crc kubenswrapper[4940]: I1126 08:52:28.219343 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:28 crc kubenswrapper[4940]: I1126 08:52:28.246842 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.246804664 podStartE2EDuration="3.246804664s" podCreationTimestamp="2025-11-26 08:52:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:52:28.236449956 +0000 UTC m=+7049.756591585" watchObservedRunningTime="2025-11-26 08:52:28.246804664 +0000 UTC m=+7049.766946323" Nov 26 08:52:28 crc kubenswrapper[4940]: I1126 08:52:28.261858 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" podStartSLOduration=3.261839243 podStartE2EDuration="3.261839243s" podCreationTimestamp="2025-11-26 08:52:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:52:28.255681147 +0000 UTC m=+7049.775822776" watchObservedRunningTime="2025-11-26 08:52:28.261839243 +0000 UTC m=+7049.781980872" Nov 26 08:52:35 crc kubenswrapper[4940]: I1126 08:52:35.871236 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:52:35 crc kubenswrapper[4940]: I1126 08:52:35.934091 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66cd6ddd57-zxnqx"] Nov 26 08:52:35 crc kubenswrapper[4940]: I1126 08:52:35.934450 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" podUID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" containerName="dnsmasq-dns" containerID="cri-o://8ec56c4a91170dd3daf40da315ed56e1a0d744f2d2109451e6dec2dfbe053f3b" gracePeriod=10 Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.303504 4940 generic.go:334] "Generic (PLEG): container finished" podID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" containerID="8ec56c4a91170dd3daf40da315ed56e1a0d744f2d2109451e6dec2dfbe053f3b" exitCode=0 Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.303757 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" event={"ID":"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc","Type":"ContainerDied","Data":"8ec56c4a91170dd3daf40da315ed56e1a0d744f2d2109451e6dec2dfbe053f3b"} Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.446535 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.579678 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-dns-svc\") pod \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.580073 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-config\") pod \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.580209 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tvmm\" (UniqueName: \"kubernetes.io/projected/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-kube-api-access-6tvmm\") pod \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.580240 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-sb\") pod \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.580297 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-nb\") pod \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\" (UID: \"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc\") " Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.589293 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-kube-api-access-6tvmm" (OuterVolumeSpecName: "kube-api-access-6tvmm") pod "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" (UID: "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc"). InnerVolumeSpecName "kube-api-access-6tvmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.629012 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" (UID: "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.650633 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" (UID: "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.656641 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" (UID: "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.660936 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-config" (OuterVolumeSpecName: "config") pod "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" (UID: "5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.684303 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.684340 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tvmm\" (UniqueName: \"kubernetes.io/projected/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-kube-api-access-6tvmm\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.684356 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.684367 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:36 crc kubenswrapper[4940]: I1126 08:52:36.684379 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.313832 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" event={"ID":"5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc","Type":"ContainerDied","Data":"1c87691ac8081cf3af4b890456e385220c51b37ccecbf4825e03801bd5c2b192"} Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.313881 4940 scope.go:117] "RemoveContainer" containerID="8ec56c4a91170dd3daf40da315ed56e1a0d744f2d2109451e6dec2dfbe053f3b" Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.314003 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cd6ddd57-zxnqx" Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.338367 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66cd6ddd57-zxnqx"] Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.345839 4940 scope.go:117] "RemoveContainer" containerID="681127c380e6dd6dfd7a565884ae9d032f2c248b36d384127c76696ab5379b11" Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.346432 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66cd6ddd57-zxnqx"] Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.508893 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.509718 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="ccdda4af-b9bc-4225-9310-b54709e7ee09" containerName="nova-cell0-conductor-conductor" containerID="cri-o://96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02" gracePeriod=30 Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.550305 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.550611 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-log" containerID="cri-o://1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07" gracePeriod=30 Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.550757 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-metadata" containerID="cri-o://7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3" gracePeriod=30 Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.571332 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.571546 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="84b7d511-58d4-46ef-af52-4dc808018a92" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://99e0a0c536159ce4f1407628eab4e22fda6bca1599755a9aeeae19b51556db79" gracePeriod=30 Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.582359 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.582559 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="849696c1-5809-4976-a4cc-b05f4432d07b" containerName="nova-scheduler-scheduler" containerID="cri-o://2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" gracePeriod=30 Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.592569 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.592807 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-log" containerID="cri-o://23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73" gracePeriod=30 Nov 26 08:52:37 crc kubenswrapper[4940]: I1126 08:52:37.592838 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-api" containerID="cri-o://d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78" gracePeriod=30 Nov 26 08:52:37 crc kubenswrapper[4940]: E1126 08:52:37.768407 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:52:37 crc kubenswrapper[4940]: E1126 08:52:37.769788 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:52:37 crc kubenswrapper[4940]: E1126 08:52:37.771178 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:52:37 crc kubenswrapper[4940]: E1126 08:52:37.771208 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="849696c1-5809-4976-a4cc-b05f4432d07b" containerName="nova-scheduler-scheduler" Nov 26 08:52:38 crc kubenswrapper[4940]: E1126 08:52:38.154501 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 08:52:38 crc kubenswrapper[4940]: E1126 08:52:38.156651 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 08:52:38 crc kubenswrapper[4940]: E1126 08:52:38.159970 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 08:52:38 crc kubenswrapper[4940]: E1126 08:52:38.160056 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="ccdda4af-b9bc-4225-9310-b54709e7ee09" containerName="nova-cell0-conductor-conductor" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.323049 4940 generic.go:334] "Generic (PLEG): container finished" podID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerID="1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07" exitCode=143 Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.323108 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"94022cb9-d378-4c8e-b06c-1cd527c59638","Type":"ContainerDied","Data":"1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07"} Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.325940 4940 generic.go:334] "Generic (PLEG): container finished" podID="ac0a1949-6143-4903-a56f-a513aad540e4" containerID="23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73" exitCode=143 Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.326007 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac0a1949-6143-4903-a56f-a513aad540e4","Type":"ContainerDied","Data":"23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73"} Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.328738 4940 generic.go:334] "Generic (PLEG): container finished" podID="84b7d511-58d4-46ef-af52-4dc808018a92" containerID="99e0a0c536159ce4f1407628eab4e22fda6bca1599755a9aeeae19b51556db79" exitCode=0 Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.328779 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"84b7d511-58d4-46ef-af52-4dc808018a92","Type":"ContainerDied","Data":"99e0a0c536159ce4f1407628eab4e22fda6bca1599755a9aeeae19b51556db79"} Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.328800 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"84b7d511-58d4-46ef-af52-4dc808018a92","Type":"ContainerDied","Data":"eeb54f9ecc8471316aa2d5c811d7087615ae6bb4e2036956505e8f7b5ac40213"} Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.328812 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eeb54f9ecc8471316aa2d5c811d7087615ae6bb4e2036956505e8f7b5ac40213" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.364543 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.392860 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.512874 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-combined-ca-bundle\") pod \"84b7d511-58d4-46ef-af52-4dc808018a92\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.512938 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-config-data\") pod \"84b7d511-58d4-46ef-af52-4dc808018a92\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.513000 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9f4v\" (UniqueName: \"kubernetes.io/projected/84b7d511-58d4-46ef-af52-4dc808018a92-kube-api-access-s9f4v\") pod \"84b7d511-58d4-46ef-af52-4dc808018a92\" (UID: \"84b7d511-58d4-46ef-af52-4dc808018a92\") " Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.519490 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84b7d511-58d4-46ef-af52-4dc808018a92-kube-api-access-s9f4v" (OuterVolumeSpecName: "kube-api-access-s9f4v") pod "84b7d511-58d4-46ef-af52-4dc808018a92" (UID: "84b7d511-58d4-46ef-af52-4dc808018a92"). InnerVolumeSpecName "kube-api-access-s9f4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.542020 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-config-data" (OuterVolumeSpecName: "config-data") pod "84b7d511-58d4-46ef-af52-4dc808018a92" (UID: "84b7d511-58d4-46ef-af52-4dc808018a92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.546085 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84b7d511-58d4-46ef-af52-4dc808018a92" (UID: "84b7d511-58d4-46ef-af52-4dc808018a92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.615644 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.615686 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84b7d511-58d4-46ef-af52-4dc808018a92-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:38 crc kubenswrapper[4940]: I1126 08:52:38.615695 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9f4v\" (UniqueName: \"kubernetes.io/projected/84b7d511-58d4-46ef-af52-4dc808018a92-kube-api-access-s9f4v\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.175094 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" path="/var/lib/kubelet/pods/5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc/volumes" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.342776 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.380814 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.390479 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.399027 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:52:39 crc kubenswrapper[4940]: E1126 08:52:39.399514 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84b7d511-58d4-46ef-af52-4dc808018a92" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.399536 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="84b7d511-58d4-46ef-af52-4dc808018a92" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 08:52:39 crc kubenswrapper[4940]: E1126 08:52:39.399584 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" containerName="init" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.399593 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" containerName="init" Nov 26 08:52:39 crc kubenswrapper[4940]: E1126 08:52:39.399607 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" containerName="dnsmasq-dns" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.399615 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" containerName="dnsmasq-dns" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.399878 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="84b7d511-58d4-46ef-af52-4dc808018a92" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.399928 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bbbe50b-2ffe-44b6-b1b8-07ca6634eacc" containerName="dnsmasq-dns" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.400780 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.407352 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.407416 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.531689 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1710b60-49d1-4953-8bbc-c79734c49f71-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.531746 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s67kt\" (UniqueName: \"kubernetes.io/projected/f1710b60-49d1-4953-8bbc-c79734c49f71-kube-api-access-s67kt\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.531818 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1710b60-49d1-4953-8bbc-c79734c49f71-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.633282 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1710b60-49d1-4953-8bbc-c79734c49f71-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.633343 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s67kt\" (UniqueName: \"kubernetes.io/projected/f1710b60-49d1-4953-8bbc-c79734c49f71-kube-api-access-s67kt\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.633382 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1710b60-49d1-4953-8bbc-c79734c49f71-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.651255 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1710b60-49d1-4953-8bbc-c79734c49f71-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.651428 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1710b60-49d1-4953-8bbc-c79734c49f71-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.655177 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s67kt\" (UniqueName: \"kubernetes.io/projected/f1710b60-49d1-4953-8bbc-c79734c49f71-kube-api-access-s67kt\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1710b60-49d1-4953-8bbc-c79734c49f71\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:39 crc kubenswrapper[4940]: I1126 08:52:39.730291 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:40 crc kubenswrapper[4940]: I1126 08:52:40.187713 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 08:52:40 crc kubenswrapper[4940]: I1126 08:52:40.364382 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f1710b60-49d1-4953-8bbc-c79734c49f71","Type":"ContainerStarted","Data":"5667a745628a61f65ac09449cdb506e94d380a057da0d9112e496645f127d013"} Nov 26 08:52:40 crc kubenswrapper[4940]: I1126 08:52:40.806525 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:52:40 crc kubenswrapper[4940]: I1126 08:52:40.806722 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="ad7344d8-55c3-4025-8cba-b5919e8d42a8" containerName="nova-cell1-conductor-conductor" containerID="cri-o://9ac04ec0ad7a154edb242afdfe0ebc6daba5068e91dae9281ac6765a664f107c" gracePeriod=30 Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.176508 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84b7d511-58d4-46ef-af52-4dc808018a92" path="/var/lib/kubelet/pods/84b7d511-58d4-46ef-af52-4dc808018a92/volumes" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.195048 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.265692 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle\") pod \"94022cb9-d378-4c8e-b06c-1cd527c59638\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.265756 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f74mk\" (UniqueName: \"kubernetes.io/projected/94022cb9-d378-4c8e-b06c-1cd527c59638-kube-api-access-f74mk\") pod \"94022cb9-d378-4c8e-b06c-1cd527c59638\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.265891 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94022cb9-d378-4c8e-b06c-1cd527c59638-logs\") pod \"94022cb9-d378-4c8e-b06c-1cd527c59638\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.265925 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-config-data\") pod \"94022cb9-d378-4c8e-b06c-1cd527c59638\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.269908 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94022cb9-d378-4c8e-b06c-1cd527c59638-logs" (OuterVolumeSpecName: "logs") pod "94022cb9-d378-4c8e-b06c-1cd527c59638" (UID: "94022cb9-d378-4c8e-b06c-1cd527c59638"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.284957 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94022cb9-d378-4c8e-b06c-1cd527c59638-kube-api-access-f74mk" (OuterVolumeSpecName: "kube-api-access-f74mk") pod "94022cb9-d378-4c8e-b06c-1cd527c59638" (UID: "94022cb9-d378-4c8e-b06c-1cd527c59638"). InnerVolumeSpecName "kube-api-access-f74mk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.314958 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.337348 4940 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle podName:94022cb9-d378-4c8e-b06c-1cd527c59638 nodeName:}" failed. No retries permitted until 2025-11-26 08:52:41.837317791 +0000 UTC m=+7063.357459410 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle") pod "94022cb9-d378-4c8e-b06c-1cd527c59638" (UID: "94022cb9-d378-4c8e-b06c-1cd527c59638") : error deleting /var/lib/kubelet/pods/94022cb9-d378-4c8e-b06c-1cd527c59638/volume-subpaths: remove /var/lib/kubelet/pods/94022cb9-d378-4c8e-b06c-1cd527c59638/volume-subpaths: no such file or directory Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.363198 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-config-data" (OuterVolumeSpecName: "config-data") pod "94022cb9-d378-4c8e-b06c-1cd527c59638" (UID: "94022cb9-d378-4c8e-b06c-1cd527c59638"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.370725 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0a1949-6143-4903-a56f-a513aad540e4-logs\") pod \"ac0a1949-6143-4903-a56f-a513aad540e4\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.370805 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-config-data\") pod \"ac0a1949-6143-4903-a56f-a513aad540e4\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.370851 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnnt6\" (UniqueName: \"kubernetes.io/projected/ac0a1949-6143-4903-a56f-a513aad540e4-kube-api-access-tnnt6\") pod \"ac0a1949-6143-4903-a56f-a513aad540e4\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.370893 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-combined-ca-bundle\") pod \"ac0a1949-6143-4903-a56f-a513aad540e4\" (UID: \"ac0a1949-6143-4903-a56f-a513aad540e4\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.371330 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94022cb9-d378-4c8e-b06c-1cd527c59638-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.371343 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.371352 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f74mk\" (UniqueName: \"kubernetes.io/projected/94022cb9-d378-4c8e-b06c-1cd527c59638-kube-api-access-f74mk\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.377466 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac0a1949-6143-4903-a56f-a513aad540e4-logs" (OuterVolumeSpecName: "logs") pod "ac0a1949-6143-4903-a56f-a513aad540e4" (UID: "ac0a1949-6143-4903-a56f-a513aad540e4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.389279 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac0a1949-6143-4903-a56f-a513aad540e4-kube-api-access-tnnt6" (OuterVolumeSpecName: "kube-api-access-tnnt6") pod "ac0a1949-6143-4903-a56f-a513aad540e4" (UID: "ac0a1949-6143-4903-a56f-a513aad540e4"). InnerVolumeSpecName "kube-api-access-tnnt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.395775 4940 generic.go:334] "Generic (PLEG): container finished" podID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerID="7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3" exitCode=0 Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.395834 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"94022cb9-d378-4c8e-b06c-1cd527c59638","Type":"ContainerDied","Data":"7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3"} Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.395858 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"94022cb9-d378-4c8e-b06c-1cd527c59638","Type":"ContainerDied","Data":"868d1d57acb5bd3a5712667a1bc9f1e7532e7fe4c905024561fb732814a232ce"} Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.395875 4940 scope.go:117] "RemoveContainer" containerID="7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.395988 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.434372 4940 generic.go:334] "Generic (PLEG): container finished" podID="ac0a1949-6143-4903-a56f-a513aad540e4" containerID="d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78" exitCode=0 Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.434509 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.434539 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac0a1949-6143-4903-a56f-a513aad540e4","Type":"ContainerDied","Data":"d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78"} Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.434897 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ac0a1949-6143-4903-a56f-a513aad540e4","Type":"ContainerDied","Data":"31076bbefd5c71a3ae5512547a5be368d6fe568b172370f71347ecf9491ad91c"} Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.439354 4940 scope.go:117] "RemoveContainer" containerID="1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.439432 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac0a1949-6143-4903-a56f-a513aad540e4" (UID: "ac0a1949-6143-4903-a56f-a513aad540e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.440383 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f1710b60-49d1-4953-8bbc-c79734c49f71","Type":"ContainerStarted","Data":"fecc548c0e9cc10e8fbf2be9b45458f88c490ac6877170b28c16040e2aeff4a8"} Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.440380 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-config-data" (OuterVolumeSpecName: "config-data") pod "ac0a1949-6143-4903-a56f-a513aad540e4" (UID: "ac0a1949-6143-4903-a56f-a513aad540e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.463524 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.463508643 podStartE2EDuration="2.463508643s" podCreationTimestamp="2025-11-26 08:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:52:41.461250731 +0000 UTC m=+7062.981392350" watchObservedRunningTime="2025-11-26 08:52:41.463508643 +0000 UTC m=+7062.983650262" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.486155 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.486184 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnnt6\" (UniqueName: \"kubernetes.io/projected/ac0a1949-6143-4903-a56f-a513aad540e4-kube-api-access-tnnt6\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.486194 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac0a1949-6143-4903-a56f-a513aad540e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.486204 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ac0a1949-6143-4903-a56f-a513aad540e4-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.488207 4940 scope.go:117] "RemoveContainer" containerID="7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3" Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.488807 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3\": container with ID starting with 7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3 not found: ID does not exist" containerID="7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.488833 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3"} err="failed to get container status \"7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3\": rpc error: code = NotFound desc = could not find container \"7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3\": container with ID starting with 7d447b31f67189a2bd5445c27835a98724dc2e20a6050b3f3c0fb2137301c1f3 not found: ID does not exist" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.488853 4940 scope.go:117] "RemoveContainer" containerID="1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07" Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.490243 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07\": container with ID starting with 1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07 not found: ID does not exist" containerID="1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.490262 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07"} err="failed to get container status \"1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07\": rpc error: code = NotFound desc = could not find container \"1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07\": container with ID starting with 1359a7ec90b6df9fcf7bc5693917ab044551ab60978a0bfb2bd4936d9c3a3d07 not found: ID does not exist" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.490275 4940 scope.go:117] "RemoveContainer" containerID="d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.517946 4940 scope.go:117] "RemoveContainer" containerID="23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.550980 4940 scope.go:117] "RemoveContainer" containerID="d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78" Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.551655 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78\": container with ID starting with d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78 not found: ID does not exist" containerID="d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.551696 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78"} err="failed to get container status \"d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78\": rpc error: code = NotFound desc = could not find container \"d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78\": container with ID starting with d7d40dfc7bba827c3d4ac310b044f2eadb9f1cb9da2ea6b05cf6de8e6ada6b78 not found: ID does not exist" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.551721 4940 scope.go:117] "RemoveContainer" containerID="23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73" Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.551997 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73\": container with ID starting with 23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73 not found: ID does not exist" containerID="23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.552015 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73"} err="failed to get container status \"23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73\": rpc error: code = NotFound desc = could not find container \"23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73\": container with ID starting with 23e0b9314a5b7cbd7d883cf6b2655cf3782144bbfd58c25cbf195752e2412e73 not found: ID does not exist" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.769443 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.788273 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.800942 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.801452 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-api" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.801472 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-api" Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.801492 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-log" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.801500 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-log" Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.801529 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-metadata" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.801535 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-metadata" Nov 26 08:52:41 crc kubenswrapper[4940]: E1126 08:52:41.801549 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-log" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.801555 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-log" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.801726 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-metadata" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.801759 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-api" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.801771 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" containerName="nova-api-log" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.801783 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" containerName="nova-metadata-log" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.802770 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.805875 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.819229 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.896374 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle\") pod \"94022cb9-d378-4c8e-b06c-1cd527c59638\" (UID: \"94022cb9-d378-4c8e-b06c-1cd527c59638\") " Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.896723 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.896840 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7jjz\" (UniqueName: \"kubernetes.io/projected/912011e6-2802-415f-9e1b-39ddfec0f182-kube-api-access-v7jjz\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.896864 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-config-data\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.896900 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912011e6-2802-415f-9e1b-39ddfec0f182-logs\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:41 crc kubenswrapper[4940]: I1126 08:52:41.916493 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "94022cb9-d378-4c8e-b06c-1cd527c59638" (UID: "94022cb9-d378-4c8e-b06c-1cd527c59638"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:41.999999 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7jjz\" (UniqueName: \"kubernetes.io/projected/912011e6-2802-415f-9e1b-39ddfec0f182-kube-api-access-v7jjz\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.000341 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-config-data\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.000387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912011e6-2802-415f-9e1b-39ddfec0f182-logs\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.000511 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.000602 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94022cb9-d378-4c8e-b06c-1cd527c59638-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.001540 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912011e6-2802-415f-9e1b-39ddfec0f182-logs\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.029275 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7jjz\" (UniqueName: \"kubernetes.io/projected/912011e6-2802-415f-9e1b-39ddfec0f182-kube-api-access-v7jjz\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.044512 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-config-data\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.046914 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.141180 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.151231 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.166892 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.182393 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.212170 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5wgq\" (UniqueName: \"kubernetes.io/projected/ccdda4af-b9bc-4225-9310-b54709e7ee09-kube-api-access-j5wgq\") pod \"ccdda4af-b9bc-4225-9310-b54709e7ee09\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.212323 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-config-data\") pod \"ccdda4af-b9bc-4225-9310-b54709e7ee09\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.212361 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-combined-ca-bundle\") pod \"ccdda4af-b9bc-4225-9310-b54709e7ee09\" (UID: \"ccdda4af-b9bc-4225-9310-b54709e7ee09\") " Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.222540 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccdda4af-b9bc-4225-9310-b54709e7ee09-kube-api-access-j5wgq" (OuterVolumeSpecName: "kube-api-access-j5wgq") pod "ccdda4af-b9bc-4225-9310-b54709e7ee09" (UID: "ccdda4af-b9bc-4225-9310-b54709e7ee09"). InnerVolumeSpecName "kube-api-access-j5wgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.239486 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: E1126 08:52:42.241473 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccdda4af-b9bc-4225-9310-b54709e7ee09" containerName="nova-cell0-conductor-conductor" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.241530 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccdda4af-b9bc-4225-9310-b54709e7ee09" containerName="nova-cell0-conductor-conductor" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.248167 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-config-data" (OuterVolumeSpecName: "config-data") pod "ccdda4af-b9bc-4225-9310-b54709e7ee09" (UID: "ccdda4af-b9bc-4225-9310-b54709e7ee09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.248374 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccdda4af-b9bc-4225-9310-b54709e7ee09" containerName="nova-cell0-conductor-conductor" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.249810 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.249903 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.252867 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.262077 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ccdda4af-b9bc-4225-9310-b54709e7ee09" (UID: "ccdda4af-b9bc-4225-9310-b54709e7ee09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.316161 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-config-data\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.316206 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqtjh\" (UniqueName: \"kubernetes.io/projected/08cf79c6-c1be-4547-b294-f2f7361aa574-kube-api-access-nqtjh\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.316245 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.316289 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cf79c6-c1be-4547-b294-f2f7361aa574-logs\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.316448 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5wgq\" (UniqueName: \"kubernetes.io/projected/ccdda4af-b9bc-4225-9310-b54709e7ee09-kube-api-access-j5wgq\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.316462 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.316471 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccdda4af-b9bc-4225-9310-b54709e7ee09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.419983 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cf79c6-c1be-4547-b294-f2f7361aa574-logs\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.420442 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-config-data\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.420475 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqtjh\" (UniqueName: \"kubernetes.io/projected/08cf79c6-c1be-4547-b294-f2f7361aa574-kube-api-access-nqtjh\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.420505 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.422924 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cf79c6-c1be-4547-b294-f2f7361aa574-logs\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.424733 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.425617 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-config-data\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.442969 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqtjh\" (UniqueName: \"kubernetes.io/projected/08cf79c6-c1be-4547-b294-f2f7361aa574-kube-api-access-nqtjh\") pod \"nova-metadata-0\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.470498 4940 generic.go:334] "Generic (PLEG): container finished" podID="ad7344d8-55c3-4025-8cba-b5919e8d42a8" containerID="9ac04ec0ad7a154edb242afdfe0ebc6daba5068e91dae9281ac6765a664f107c" exitCode=0 Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.470571 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ad7344d8-55c3-4025-8cba-b5919e8d42a8","Type":"ContainerDied","Data":"9ac04ec0ad7a154edb242afdfe0ebc6daba5068e91dae9281ac6765a664f107c"} Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.503446 4940 generic.go:334] "Generic (PLEG): container finished" podID="ccdda4af-b9bc-4225-9310-b54709e7ee09" containerID="96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02" exitCode=0 Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.504530 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.508522 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"ccdda4af-b9bc-4225-9310-b54709e7ee09","Type":"ContainerDied","Data":"96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02"} Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.508590 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"ccdda4af-b9bc-4225-9310-b54709e7ee09","Type":"ContainerDied","Data":"671aaecf499d674dd1ac560f38d14fc90f49dc15a01d67479fd56ba59bb4bb8f"} Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.508608 4940 scope.go:117] "RemoveContainer" containerID="96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.541335 4940 scope.go:117] "RemoveContainer" containerID="96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02" Nov 26 08:52:42 crc kubenswrapper[4940]: E1126 08:52:42.543405 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02\": container with ID starting with 96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02 not found: ID does not exist" containerID="96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.543449 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02"} err="failed to get container status \"96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02\": rpc error: code = NotFound desc = could not find container \"96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02\": container with ID starting with 96fb96343cc702f5304764d1b29bc1157d045279f019bfc324d5e90b7f9a9e02 not found: ID does not exist" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.575146 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.576427 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.586986 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.598619 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.599866 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.602182 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.620295 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.724576 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.733230 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.733316 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.733366 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m7wk\" (UniqueName: \"kubernetes.io/projected/16d21285-da91-4a46-8443-7cbdfe13cd1c-kube-api-access-2m7wk\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: E1126 08:52:42.774969 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:52:42 crc kubenswrapper[4940]: E1126 08:52:42.776138 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:52:42 crc kubenswrapper[4940]: E1126 08:52:42.777345 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 08:52:42 crc kubenswrapper[4940]: E1126 08:52:42.777381 4940 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="849696c1-5809-4976-a4cc-b05f4432d07b" containerName="nova-scheduler-scheduler" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.835850 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.835910 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.835939 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m7wk\" (UniqueName: \"kubernetes.io/projected/16d21285-da91-4a46-8443-7cbdfe13cd1c-kube-api-access-2m7wk\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.840981 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.842291 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.862980 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m7wk\" (UniqueName: \"kubernetes.io/projected/16d21285-da91-4a46-8443-7cbdfe13cd1c-kube-api-access-2m7wk\") pod \"nova-cell0-conductor-0\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.917634 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:42 crc kubenswrapper[4940]: I1126 08:52:42.960010 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.041528 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-config-data\") pod \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.041689 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-combined-ca-bundle\") pod \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.041716 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swm7x\" (UniqueName: \"kubernetes.io/projected/ad7344d8-55c3-4025-8cba-b5919e8d42a8-kube-api-access-swm7x\") pod \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\" (UID: \"ad7344d8-55c3-4025-8cba-b5919e8d42a8\") " Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.059342 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad7344d8-55c3-4025-8cba-b5919e8d42a8-kube-api-access-swm7x" (OuterVolumeSpecName: "kube-api-access-swm7x") pod "ad7344d8-55c3-4025-8cba-b5919e8d42a8" (UID: "ad7344d8-55c3-4025-8cba-b5919e8d42a8"). InnerVolumeSpecName "kube-api-access-swm7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.088393 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad7344d8-55c3-4025-8cba-b5919e8d42a8" (UID: "ad7344d8-55c3-4025-8cba-b5919e8d42a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.092225 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-config-data" (OuterVolumeSpecName: "config-data") pod "ad7344d8-55c3-4025-8cba-b5919e8d42a8" (UID: "ad7344d8-55c3-4025-8cba-b5919e8d42a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.145387 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.145414 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad7344d8-55c3-4025-8cba-b5919e8d42a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.145424 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swm7x\" (UniqueName: \"kubernetes.io/projected/ad7344d8-55c3-4025-8cba-b5919e8d42a8-kube-api-access-swm7x\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.150735 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.183021 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94022cb9-d378-4c8e-b06c-1cd527c59638" path="/var/lib/kubelet/pods/94022cb9-d378-4c8e-b06c-1cd527c59638/volumes" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.183908 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac0a1949-6143-4903-a56f-a513aad540e4" path="/var/lib/kubelet/pods/ac0a1949-6143-4903-a56f-a513aad540e4/volumes" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.184487 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccdda4af-b9bc-4225-9310-b54709e7ee09" path="/var/lib/kubelet/pods/ccdda4af-b9bc-4225-9310-b54709e7ee09/volumes" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.449562 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.514342 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"912011e6-2802-415f-9e1b-39ddfec0f182","Type":"ContainerStarted","Data":"87a17963a9354e55ecada023c358dba20d8b3c3d610fe454219ac91a85895250"} Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.514421 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"912011e6-2802-415f-9e1b-39ddfec0f182","Type":"ContainerStarted","Data":"a2602e1a43f7c9a447a7d77ead2b8bda0ab0ab0f32d87126ff38e712bcc5cc84"} Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.514439 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"912011e6-2802-415f-9e1b-39ddfec0f182","Type":"ContainerStarted","Data":"e118e8633cadda7a0b817fd66efc94c3229710c3948d7433ca222091284e85b2"} Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.520416 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"16d21285-da91-4a46-8443-7cbdfe13cd1c","Type":"ContainerStarted","Data":"1c0bccd9ce223ee004a976d5578e54da821dca1960686f19e2cd39dc4c52d905"} Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.521706 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cf79c6-c1be-4547-b294-f2f7361aa574","Type":"ContainerStarted","Data":"8cd4682e79902c32f98b0243d6629777b564b16dd50f6b2c5051a7d77c6c8f27"} Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.521731 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cf79c6-c1be-4547-b294-f2f7361aa574","Type":"ContainerStarted","Data":"ce8478f22df6095a887da974c1f748534dd598159d2e97d96a668d3a9c7f25e4"} Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.524224 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ad7344d8-55c3-4025-8cba-b5919e8d42a8","Type":"ContainerDied","Data":"be3d927028bb1fed4d09852a1071038377a75dee5bfeed0d789e83c84a29e48a"} Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.524249 4940 scope.go:117] "RemoveContainer" containerID="9ac04ec0ad7a154edb242afdfe0ebc6daba5068e91dae9281ac6765a664f107c" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.524341 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.544518 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.544491826 podStartE2EDuration="2.544491826s" podCreationTimestamp="2025-11-26 08:52:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:52:43.53016991 +0000 UTC m=+7065.050311529" watchObservedRunningTime="2025-11-26 08:52:43.544491826 +0000 UTC m=+7065.064633445" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.558797 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.572984 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.590924 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:52:43 crc kubenswrapper[4940]: E1126 08:52:43.591370 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad7344d8-55c3-4025-8cba-b5919e8d42a8" containerName="nova-cell1-conductor-conductor" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.591391 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad7344d8-55c3-4025-8cba-b5919e8d42a8" containerName="nova-cell1-conductor-conductor" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.591601 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad7344d8-55c3-4025-8cba-b5919e8d42a8" containerName="nova-cell1-conductor-conductor" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.592341 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.597386 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.613763 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.669217 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.669410 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2cf7\" (UniqueName: \"kubernetes.io/projected/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-kube-api-access-z2cf7\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.669456 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.770668 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.771061 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2cf7\" (UniqueName: \"kubernetes.io/projected/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-kube-api-access-z2cf7\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.771100 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.775370 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.775980 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.790336 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2cf7\" (UniqueName: \"kubernetes.io/projected/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-kube-api-access-z2cf7\") pod \"nova-cell1-conductor-0\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:43 crc kubenswrapper[4940]: I1126 08:52:43.924079 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:44 crc kubenswrapper[4940]: I1126 08:52:44.372726 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 08:52:44 crc kubenswrapper[4940]: I1126 08:52:44.541637 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa","Type":"ContainerStarted","Data":"af2753fe05110e0f319a53efbf8ec6e8ff19433ac569674601633541f3ffe9eb"} Nov 26 08:52:44 crc kubenswrapper[4940]: I1126 08:52:44.543712 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"16d21285-da91-4a46-8443-7cbdfe13cd1c","Type":"ContainerStarted","Data":"7839800394ff273955e113f230a0c7c3891774792b193261806e9ef7465de682"} Nov 26 08:52:44 crc kubenswrapper[4940]: I1126 08:52:44.543803 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:44 crc kubenswrapper[4940]: I1126 08:52:44.550030 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cf79c6-c1be-4547-b294-f2f7361aa574","Type":"ContainerStarted","Data":"61e408d219bb80c5fcd96efc5eb0ad4b7a9f4e6a5c5c4677e5d950b70dcf7198"} Nov 26 08:52:44 crc kubenswrapper[4940]: I1126 08:52:44.568455 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.5684377510000003 podStartE2EDuration="2.568437751s" podCreationTimestamp="2025-11-26 08:52:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:52:44.565439185 +0000 UTC m=+7066.085580854" watchObservedRunningTime="2025-11-26 08:52:44.568437751 +0000 UTC m=+7066.088579380" Nov 26 08:52:44 crc kubenswrapper[4940]: I1126 08:52:44.592096 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.592076553 podStartE2EDuration="2.592076553s" podCreationTimestamp="2025-11-26 08:52:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:52:44.589278904 +0000 UTC m=+7066.109420533" watchObservedRunningTime="2025-11-26 08:52:44.592076553 +0000 UTC m=+7066.112218172" Nov 26 08:52:44 crc kubenswrapper[4940]: I1126 08:52:44.731437 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:45 crc kubenswrapper[4940]: I1126 08:52:45.187333 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad7344d8-55c3-4025-8cba-b5919e8d42a8" path="/var/lib/kubelet/pods/ad7344d8-55c3-4025-8cba-b5919e8d42a8/volumes" Nov 26 08:52:45 crc kubenswrapper[4940]: I1126 08:52:45.597688 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa","Type":"ContainerStarted","Data":"b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08"} Nov 26 08:52:45 crc kubenswrapper[4940]: I1126 08:52:45.598327 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:45 crc kubenswrapper[4940]: I1126 08:52:45.622904 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.6228711860000002 podStartE2EDuration="2.622871186s" podCreationTimestamp="2025-11-26 08:52:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:52:45.609922374 +0000 UTC m=+7067.130064003" watchObservedRunningTime="2025-11-26 08:52:45.622871186 +0000 UTC m=+7067.143012795" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.170214 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.340954 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-config-data\") pod \"849696c1-5809-4976-a4cc-b05f4432d07b\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.341015 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t66g5\" (UniqueName: \"kubernetes.io/projected/849696c1-5809-4976-a4cc-b05f4432d07b-kube-api-access-t66g5\") pod \"849696c1-5809-4976-a4cc-b05f4432d07b\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.341080 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-combined-ca-bundle\") pod \"849696c1-5809-4976-a4cc-b05f4432d07b\" (UID: \"849696c1-5809-4976-a4cc-b05f4432d07b\") " Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.360453 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/849696c1-5809-4976-a4cc-b05f4432d07b-kube-api-access-t66g5" (OuterVolumeSpecName: "kube-api-access-t66g5") pod "849696c1-5809-4976-a4cc-b05f4432d07b" (UID: "849696c1-5809-4976-a4cc-b05f4432d07b"). InnerVolumeSpecName "kube-api-access-t66g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.370077 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "849696c1-5809-4976-a4cc-b05f4432d07b" (UID: "849696c1-5809-4976-a4cc-b05f4432d07b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.373700 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-config-data" (OuterVolumeSpecName: "config-data") pod "849696c1-5809-4976-a4cc-b05f4432d07b" (UID: "849696c1-5809-4976-a4cc-b05f4432d07b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.443740 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t66g5\" (UniqueName: \"kubernetes.io/projected/849696c1-5809-4976-a4cc-b05f4432d07b-kube-api-access-t66g5\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.443810 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.443823 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/849696c1-5809-4976-a4cc-b05f4432d07b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.577590 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.578078 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.613947 4940 generic.go:334] "Generic (PLEG): container finished" podID="849696c1-5809-4976-a4cc-b05f4432d07b" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" exitCode=0 Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.614140 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.614723 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"849696c1-5809-4976-a4cc-b05f4432d07b","Type":"ContainerDied","Data":"2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c"} Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.614761 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"849696c1-5809-4976-a4cc-b05f4432d07b","Type":"ContainerDied","Data":"eba11557834f168f812046e7dc8c76e1865dd110fccb5120406bd55251852f26"} Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.614778 4940 scope.go:117] "RemoveContainer" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.662483 4940 scope.go:117] "RemoveContainer" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" Nov 26 08:52:47 crc kubenswrapper[4940]: E1126 08:52:47.663058 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c\": container with ID starting with 2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c not found: ID does not exist" containerID="2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.663122 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c"} err="failed to get container status \"2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c\": rpc error: code = NotFound desc = could not find container \"2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c\": container with ID starting with 2242f511fa4a1d019232714dd9871897b5d3c8b1515babae5380884fe1745c6c not found: ID does not exist" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.692360 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.705679 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.714597 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:52:47 crc kubenswrapper[4940]: E1126 08:52:47.715278 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="849696c1-5809-4976-a4cc-b05f4432d07b" containerName="nova-scheduler-scheduler" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.715368 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="849696c1-5809-4976-a4cc-b05f4432d07b" containerName="nova-scheduler-scheduler" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.715715 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="849696c1-5809-4976-a4cc-b05f4432d07b" containerName="nova-scheduler-scheduler" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.719469 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.725176 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.727672 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.850847 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-config-data\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.851101 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xzqg\" (UniqueName: \"kubernetes.io/projected/f0b14554-e939-4a03-95a3-10517457591a-kube-api-access-5xzqg\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.851318 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.953012 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xzqg\" (UniqueName: \"kubernetes.io/projected/f0b14554-e939-4a03-95a3-10517457591a-kube-api-access-5xzqg\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.953158 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.953307 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-config-data\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.957506 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-config-data\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.961557 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:47 crc kubenswrapper[4940]: I1126 08:52:47.974517 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xzqg\" (UniqueName: \"kubernetes.io/projected/f0b14554-e939-4a03-95a3-10517457591a-kube-api-access-5xzqg\") pod \"nova-scheduler-0\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " pod="openstack/nova-scheduler-0" Nov 26 08:52:48 crc kubenswrapper[4940]: I1126 08:52:48.040034 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 08:52:48 crc kubenswrapper[4940]: I1126 08:52:48.512257 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 08:52:48 crc kubenswrapper[4940]: W1126 08:52:48.517200 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0b14554_e939_4a03_95a3_10517457591a.slice/crio-9f021f15c6cab9c8930a67047d69ead1f35edc0a4658c811ffb8d806604e35bd WatchSource:0}: Error finding container 9f021f15c6cab9c8930a67047d69ead1f35edc0a4658c811ffb8d806604e35bd: Status 404 returned error can't find the container with id 9f021f15c6cab9c8930a67047d69ead1f35edc0a4658c811ffb8d806604e35bd Nov 26 08:52:48 crc kubenswrapper[4940]: I1126 08:52:48.629683 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f0b14554-e939-4a03-95a3-10517457591a","Type":"ContainerStarted","Data":"9f021f15c6cab9c8930a67047d69ead1f35edc0a4658c811ffb8d806604e35bd"} Nov 26 08:52:49 crc kubenswrapper[4940]: I1126 08:52:49.187833 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="849696c1-5809-4976-a4cc-b05f4432d07b" path="/var/lib/kubelet/pods/849696c1-5809-4976-a4cc-b05f4432d07b/volumes" Nov 26 08:52:49 crc kubenswrapper[4940]: I1126 08:52:49.649662 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f0b14554-e939-4a03-95a3-10517457591a","Type":"ContainerStarted","Data":"2ca50f6cda18fa9be1e2d835aaa30879b39f9dc7cbe5f1764500dd3ef907357f"} Nov 26 08:52:49 crc kubenswrapper[4940]: I1126 08:52:49.695335 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.695295554 podStartE2EDuration="2.695295554s" podCreationTimestamp="2025-11-26 08:52:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:52:49.683319253 +0000 UTC m=+7071.203460912" watchObservedRunningTime="2025-11-26 08:52:49.695295554 +0000 UTC m=+7071.215437233" Nov 26 08:52:49 crc kubenswrapper[4940]: I1126 08:52:49.730894 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:49 crc kubenswrapper[4940]: I1126 08:52:49.743574 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:50 crc kubenswrapper[4940]: I1126 08:52:50.685615 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 08:52:52 crc kubenswrapper[4940]: I1126 08:52:52.141472 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 08:52:52 crc kubenswrapper[4940]: I1126 08:52:52.141867 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 08:52:52 crc kubenswrapper[4940]: I1126 08:52:52.577735 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 08:52:52 crc kubenswrapper[4940]: I1126 08:52:52.577820 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 08:52:52 crc kubenswrapper[4940]: I1126 08:52:52.957675 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 08:52:53 crc kubenswrapper[4940]: I1126 08:52:53.041589 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 08:52:53 crc kubenswrapper[4940]: I1126 08:52:53.224287 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.123:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:52:53 crc kubenswrapper[4940]: I1126 08:52:53.224312 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.123:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:52:53 crc kubenswrapper[4940]: I1126 08:52:53.659198 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.124:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:52:53 crc kubenswrapper[4940]: I1126 08:52:53.659193 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.124:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 08:52:53 crc kubenswrapper[4940]: I1126 08:52:53.959861 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 08:52:58 crc kubenswrapper[4940]: I1126 08:52:58.040760 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 08:52:58 crc kubenswrapper[4940]: I1126 08:52:58.069855 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 08:52:58 crc kubenswrapper[4940]: I1126 08:52:58.805722 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.577097 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.581251 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.584744 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.622185 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.656541 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/86786994-9e11-4fa2-8892-7390a41315a0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.656644 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4nqw\" (UniqueName: \"kubernetes.io/projected/86786994-9e11-4fa2-8892-7390a41315a0-kube-api-access-h4nqw\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.656665 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.656680 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.656702 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.656729 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-scripts\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.760965 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/86786994-9e11-4fa2-8892-7390a41315a0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.761089 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4nqw\" (UniqueName: \"kubernetes.io/projected/86786994-9e11-4fa2-8892-7390a41315a0-kube-api-access-h4nqw\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.761109 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.761125 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.761149 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.761178 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-scripts\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.766880 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.766974 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/86786994-9e11-4fa2-8892-7390a41315a0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.767169 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-scripts\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.781104 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.781580 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.829084 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4nqw\" (UniqueName: \"kubernetes.io/projected/86786994-9e11-4fa2-8892-7390a41315a0-kube-api-access-h4nqw\") pod \"cinder-scheduler-0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:00 crc kubenswrapper[4940]: I1126 08:53:00.923417 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 08:53:01 crc kubenswrapper[4940]: I1126 08:53:01.375228 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:01 crc kubenswrapper[4940]: W1126 08:53:01.379659 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86786994_9e11_4fa2_8892_7390a41315a0.slice/crio-7bde00443e52f99bfe7e050c11cccc1c8020121dd5a73fc0180fd2193756f5d2 WatchSource:0}: Error finding container 7bde00443e52f99bfe7e050c11cccc1c8020121dd5a73fc0180fd2193756f5d2: Status 404 returned error can't find the container with id 7bde00443e52f99bfe7e050c11cccc1c8020121dd5a73fc0180fd2193756f5d2 Nov 26 08:53:01 crc kubenswrapper[4940]: I1126 08:53:01.785184 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"86786994-9e11-4fa2-8892-7390a41315a0","Type":"ContainerStarted","Data":"7bde00443e52f99bfe7e050c11cccc1c8020121dd5a73fc0180fd2193756f5d2"} Nov 26 08:53:01 crc kubenswrapper[4940]: I1126 08:53:01.966077 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:53:01 crc kubenswrapper[4940]: I1126 08:53:01.966316 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerName="cinder-api-log" containerID="cri-o://54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d" gracePeriod=30 Nov 26 08:53:01 crc kubenswrapper[4940]: I1126 08:53:01.966369 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerName="cinder-api" containerID="cri-o://639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3" gracePeriod=30 Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.145048 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.145644 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.145900 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.148722 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.581281 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.581734 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.595338 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.595627 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.639126 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.641190 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.647833 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.681469 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698665 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698726 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698746 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698768 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698784 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698803 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-dev\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698828 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698855 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-run\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698869 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8942266b-29df-4f10-a2fb-b9c1a6921107-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698889 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698905 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxdvb\" (UniqueName: \"kubernetes.io/projected/8942266b-29df-4f10-a2fb-b9c1a6921107-kube-api-access-qxdvb\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698923 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698945 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698965 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.698989 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.699002 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-sys\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.799982 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801138 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801184 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801203 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-sys\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801244 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801294 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801315 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801341 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801360 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801381 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-dev\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801428 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801472 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-run\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801492 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8942266b-29df-4f10-a2fb-b9c1a6921107-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801515 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801530 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxdvb\" (UniqueName: \"kubernetes.io/projected/8942266b-29df-4f10-a2fb-b9c1a6921107-kube-api-access-qxdvb\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801555 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801770 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-run\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801803 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.801831 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.802257 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.803764 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-dev\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.803839 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.803879 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-sys\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.803930 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.804068 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.804740 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.805101 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8942266b-29df-4f10-a2fb-b9c1a6921107-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.806396 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.806404 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.806790 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8942266b-29df-4f10-a2fb-b9c1a6921107-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.807806 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8942266b-29df-4f10-a2fb-b9c1a6921107-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.811922 4940 generic.go:334] "Generic (PLEG): container finished" podID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerID="54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d" exitCode=143 Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.812008 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd36bbc0-38d1-4086-9493-e20bc64a60d0","Type":"ContainerDied","Data":"54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d"} Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.816444 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"86786994-9e11-4fa2-8892-7390a41315a0","Type":"ContainerStarted","Data":"5ad81fbdc6d73b1c46414234eca2b579c088df48bf4a2b044a44949b18f7e31e"} Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.816476 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.821239 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxdvb\" (UniqueName: \"kubernetes.io/projected/8942266b-29df-4f10-a2fb-b9c1a6921107-kube-api-access-qxdvb\") pod \"cinder-volume-volume1-0\" (UID: \"8942266b-29df-4f10-a2fb-b9c1a6921107\") " pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.828557 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.853451 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.588817927 podStartE2EDuration="2.85342512s" podCreationTimestamp="2025-11-26 08:53:00 +0000 UTC" firstStartedPulling="2025-11-26 08:53:01.382098781 +0000 UTC m=+7082.902240400" lastFinishedPulling="2025-11-26 08:53:01.646705974 +0000 UTC m=+7083.166847593" observedRunningTime="2025-11-26 08:53:02.850103855 +0000 UTC m=+7084.370245484" watchObservedRunningTime="2025-11-26 08:53:02.85342512 +0000 UTC m=+7084.373566739" Nov 26 08:53:02 crc kubenswrapper[4940]: I1126 08:53:02.975452 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.269883 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.272149 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.274167 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.281727 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.423894 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-lib-modules\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424276 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424303 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-config-data\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424355 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-run\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424385 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424417 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ctcc\" (UniqueName: \"kubernetes.io/projected/b4b23dcd-1a65-4784-a247-2475cc261618-kube-api-access-7ctcc\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424461 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424498 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-nvme\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424533 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-dev\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424562 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424598 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-scripts\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424621 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b4b23dcd-1a65-4784-a247-2475cc261618-ceph\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424639 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-config-data-custom\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424675 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424823 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-sys\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.424916 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.526763 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-run\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.526825 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.526857 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ctcc\" (UniqueName: \"kubernetes.io/projected/b4b23dcd-1a65-4784-a247-2475cc261618-kube-api-access-7ctcc\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.526902 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.526940 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-nvme\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.526978 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-dev\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527006 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527007 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527071 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-scripts\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527099 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b4b23dcd-1a65-4784-a247-2475cc261618-ceph\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527119 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-config-data-custom\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527120 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-nvme\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.526898 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-run\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527155 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527145 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527212 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-sys\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527185 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-sys\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527352 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527428 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527453 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-lib-modules\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527520 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527554 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-lib-modules\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527565 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-config-data\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527624 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527519 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-dev\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.527855 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/b4b23dcd-1a65-4784-a247-2475cc261618-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.533183 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/b4b23dcd-1a65-4784-a247-2475cc261618-ceph\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.533273 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.533986 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-scripts\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.534556 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-config-data\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.535187 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4b23dcd-1a65-4784-a247-2475cc261618-config-data-custom\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.559265 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ctcc\" (UniqueName: \"kubernetes.io/projected/b4b23dcd-1a65-4784-a247-2475cc261618-kube-api-access-7ctcc\") pod \"cinder-backup-0\" (UID: \"b4b23dcd-1a65-4784-a247-2475cc261618\") " pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.579027 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 26 08:53:03 crc kubenswrapper[4940]: W1126 08:53:03.579411 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8942266b_29df_4f10_a2fb_b9c1a6921107.slice/crio-8a43ce2ade4a62bfcb15a24cf7d3c2590a7135b6578190898af35b2cf27fd890 WatchSource:0}: Error finding container 8a43ce2ade4a62bfcb15a24cf7d3c2590a7135b6578190898af35b2cf27fd890: Status 404 returned error can't find the container with id 8a43ce2ade4a62bfcb15a24cf7d3c2590a7135b6578190898af35b2cf27fd890 Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.602158 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.837881 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"86786994-9e11-4fa2-8892-7390a41315a0","Type":"ContainerStarted","Data":"553980f9e32b5907397d2eead56d683b5eaaeeeac4ad04f2edecd79bc55d5199"} Nov 26 08:53:03 crc kubenswrapper[4940]: I1126 08:53:03.840189 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"8942266b-29df-4f10-a2fb-b9c1a6921107","Type":"ContainerStarted","Data":"8a43ce2ade4a62bfcb15a24cf7d3c2590a7135b6578190898af35b2cf27fd890"} Nov 26 08:53:04 crc kubenswrapper[4940]: I1126 08:53:04.102006 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 26 08:53:04 crc kubenswrapper[4940]: W1126 08:53:04.104321 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4b23dcd_1a65_4784_a247_2475cc261618.slice/crio-85852299e884c92538441af29c61e6fc956ce4215356ac4c102433c461e14bfe WatchSource:0}: Error finding container 85852299e884c92538441af29c61e6fc956ce4215356ac4c102433c461e14bfe: Status 404 returned error can't find the container with id 85852299e884c92538441af29c61e6fc956ce4215356ac4c102433c461e14bfe Nov 26 08:53:04 crc kubenswrapper[4940]: I1126 08:53:04.850238 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"b4b23dcd-1a65-4784-a247-2475cc261618","Type":"ContainerStarted","Data":"13d1bf352efd2ac03ca5684c1c7da2d28c94f8e954e1e6b8ceea1b4dd6ab9c63"} Nov 26 08:53:04 crc kubenswrapper[4940]: I1126 08:53:04.851662 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"b4b23dcd-1a65-4784-a247-2475cc261618","Type":"ContainerStarted","Data":"85852299e884c92538441af29c61e6fc956ce4215356ac4c102433c461e14bfe"} Nov 26 08:53:04 crc kubenswrapper[4940]: I1126 08:53:04.854055 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"8942266b-29df-4f10-a2fb-b9c1a6921107","Type":"ContainerStarted","Data":"f68a407a46c409c556664184bc1cbf8ef025ac2254fb873b1e4d0e9e2011c7c4"} Nov 26 08:53:04 crc kubenswrapper[4940]: I1126 08:53:04.854095 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"8942266b-29df-4f10-a2fb-b9c1a6921107","Type":"ContainerStarted","Data":"036163c1d4071a5d57238a2dc07d2efb2c2a853ec51922f2cb10686b322d6ef8"} Nov 26 08:53:04 crc kubenswrapper[4940]: I1126 08:53:04.883727 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=2.478144668 podStartE2EDuration="2.883689471s" podCreationTimestamp="2025-11-26 08:53:02 +0000 UTC" firstStartedPulling="2025-11-26 08:53:03.581934343 +0000 UTC m=+7085.102075962" lastFinishedPulling="2025-11-26 08:53:03.987479146 +0000 UTC m=+7085.507620765" observedRunningTime="2025-11-26 08:53:04.877589567 +0000 UTC m=+7086.397731206" watchObservedRunningTime="2025-11-26 08:53:04.883689471 +0000 UTC m=+7086.403831100" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.495740 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.666907 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-scripts\") pod \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.666960 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd36bbc0-38d1-4086-9493-e20bc64a60d0-etc-machine-id\") pod \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.666997 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data\") pod \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.667034 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data-custom\") pod \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.667076 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-combined-ca-bundle\") pod \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.667116 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd36bbc0-38d1-4086-9493-e20bc64a60d0-logs\") pod \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.667116 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dd36bbc0-38d1-4086-9493-e20bc64a60d0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "dd36bbc0-38d1-4086-9493-e20bc64a60d0" (UID: "dd36bbc0-38d1-4086-9493-e20bc64a60d0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.667146 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7t7b\" (UniqueName: \"kubernetes.io/projected/dd36bbc0-38d1-4086-9493-e20bc64a60d0-kube-api-access-v7t7b\") pod \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\" (UID: \"dd36bbc0-38d1-4086-9493-e20bc64a60d0\") " Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.667523 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd36bbc0-38d1-4086-9493-e20bc64a60d0-logs" (OuterVolumeSpecName: "logs") pod "dd36bbc0-38d1-4086-9493-e20bc64a60d0" (UID: "dd36bbc0-38d1-4086-9493-e20bc64a60d0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.668166 4940 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/dd36bbc0-38d1-4086-9493-e20bc64a60d0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.668189 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd36bbc0-38d1-4086-9493-e20bc64a60d0-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.672701 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "dd36bbc0-38d1-4086-9493-e20bc64a60d0" (UID: "dd36bbc0-38d1-4086-9493-e20bc64a60d0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.673105 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-scripts" (OuterVolumeSpecName: "scripts") pod "dd36bbc0-38d1-4086-9493-e20bc64a60d0" (UID: "dd36bbc0-38d1-4086-9493-e20bc64a60d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.679860 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd36bbc0-38d1-4086-9493-e20bc64a60d0-kube-api-access-v7t7b" (OuterVolumeSpecName: "kube-api-access-v7t7b") pod "dd36bbc0-38d1-4086-9493-e20bc64a60d0" (UID: "dd36bbc0-38d1-4086-9493-e20bc64a60d0"). InnerVolumeSpecName "kube-api-access-v7t7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.707427 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd36bbc0-38d1-4086-9493-e20bc64a60d0" (UID: "dd36bbc0-38d1-4086-9493-e20bc64a60d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.732841 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data" (OuterVolumeSpecName: "config-data") pod "dd36bbc0-38d1-4086-9493-e20bc64a60d0" (UID: "dd36bbc0-38d1-4086-9493-e20bc64a60d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.770256 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.770296 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.770308 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.770319 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd36bbc0-38d1-4086-9493-e20bc64a60d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.770331 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7t7b\" (UniqueName: \"kubernetes.io/projected/dd36bbc0-38d1-4086-9493-e20bc64a60d0-kube-api-access-v7t7b\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.864693 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"b4b23dcd-1a65-4784-a247-2475cc261618","Type":"ContainerStarted","Data":"fd0b3a1f63fab608629af448c5a5db5cded435504f63fab0bbf5c3c0d44d263e"} Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.866461 4940 generic.go:334] "Generic (PLEG): container finished" podID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerID="639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3" exitCode=0 Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.866998 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd36bbc0-38d1-4086-9493-e20bc64a60d0","Type":"ContainerDied","Data":"639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3"} Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.867066 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"dd36bbc0-38d1-4086-9493-e20bc64a60d0","Type":"ContainerDied","Data":"673707b5d9df8bb18c85876ebd36f525c45b55ff9396b23661f5547d4758c1e7"} Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.867078 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.867083 4940 scope.go:117] "RemoveContainer" containerID="639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.917422 4940 scope.go:117] "RemoveContainer" containerID="54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.924119 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.582952173 podStartE2EDuration="2.924098659s" podCreationTimestamp="2025-11-26 08:53:03 +0000 UTC" firstStartedPulling="2025-11-26 08:53:04.108210685 +0000 UTC m=+7085.628352304" lastFinishedPulling="2025-11-26 08:53:04.449357171 +0000 UTC m=+7085.969498790" observedRunningTime="2025-11-26 08:53:05.888157386 +0000 UTC m=+7087.408299015" watchObservedRunningTime="2025-11-26 08:53:05.924098659 +0000 UTC m=+7087.444240278" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.924380 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.942811 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.958255 4940 scope.go:117] "RemoveContainer" containerID="639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.975518 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:53:05 crc kubenswrapper[4940]: E1126 08:53:05.985250 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3\": container with ID starting with 639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3 not found: ID does not exist" containerID="639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.985305 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3"} err="failed to get container status \"639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3\": rpc error: code = NotFound desc = could not find container \"639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3\": container with ID starting with 639d71224333387addddd524052a6602d95eac26050ff1b54a9872a2a736f9c3 not found: ID does not exist" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.985334 4940 scope.go:117] "RemoveContainer" containerID="54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d" Nov 26 08:53:05 crc kubenswrapper[4940]: E1126 08:53:05.987523 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d\": container with ID starting with 54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d not found: ID does not exist" containerID="54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.987564 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d"} err="failed to get container status \"54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d\": rpc error: code = NotFound desc = could not find container \"54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d\": container with ID starting with 54e90343fd9b988c65e41872437205697ce6a463cc32a17636b88fe7e01a114d not found: ID does not exist" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.993205 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:53:05 crc kubenswrapper[4940]: E1126 08:53:05.994124 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerName="cinder-api-log" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.994154 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerName="cinder-api-log" Nov 26 08:53:05 crc kubenswrapper[4940]: E1126 08:53:05.994170 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerName="cinder-api" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.994176 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerName="cinder-api" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.994410 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerName="cinder-api" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.994448 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" containerName="cinder-api-log" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.995625 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 08:53:05 crc kubenswrapper[4940]: I1126 08:53:05.998435 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.002867 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.177911 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.177963 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-scripts\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.178072 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80118a04-9a91-4d85-817a-3cd24b169e18-etc-machine-id\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.178114 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-config-data-custom\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.178158 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zbhb\" (UniqueName: \"kubernetes.io/projected/80118a04-9a91-4d85-817a-3cd24b169e18-kube-api-access-6zbhb\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.178203 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80118a04-9a91-4d85-817a-3cd24b169e18-logs\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.178460 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-config-data\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.281260 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80118a04-9a91-4d85-817a-3cd24b169e18-etc-machine-id\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.281344 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-config-data-custom\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.281387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zbhb\" (UniqueName: \"kubernetes.io/projected/80118a04-9a91-4d85-817a-3cd24b169e18-kube-api-access-6zbhb\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.281409 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80118a04-9a91-4d85-817a-3cd24b169e18-logs\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.281457 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-config-data\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.281488 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.281526 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-scripts\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.281862 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/80118a04-9a91-4d85-817a-3cd24b169e18-etc-machine-id\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.282186 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80118a04-9a91-4d85-817a-3cd24b169e18-logs\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.288333 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-config-data-custom\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.288936 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-config-data\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.289454 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.290256 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80118a04-9a91-4d85-817a-3cd24b169e18-scripts\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.303803 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zbhb\" (UniqueName: \"kubernetes.io/projected/80118a04-9a91-4d85-817a-3cd24b169e18-kube-api-access-6zbhb\") pod \"cinder-api-0\" (UID: \"80118a04-9a91-4d85-817a-3cd24b169e18\") " pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.334996 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.786639 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 08:53:06 crc kubenswrapper[4940]: W1126 08:53:06.795106 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80118a04_9a91_4d85_817a_3cd24b169e18.slice/crio-5a4211bb6a283a198dc94e6f073ef08a0c36c9dabc5002ae846aede96d2adddb WatchSource:0}: Error finding container 5a4211bb6a283a198dc94e6f073ef08a0c36c9dabc5002ae846aede96d2adddb: Status 404 returned error can't find the container with id 5a4211bb6a283a198dc94e6f073ef08a0c36c9dabc5002ae846aede96d2adddb Nov 26 08:53:06 crc kubenswrapper[4940]: I1126 08:53:06.878128 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"80118a04-9a91-4d85-817a-3cd24b169e18","Type":"ContainerStarted","Data":"5a4211bb6a283a198dc94e6f073ef08a0c36c9dabc5002ae846aede96d2adddb"} Nov 26 08:53:07 crc kubenswrapper[4940]: I1126 08:53:07.178229 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd36bbc0-38d1-4086-9493-e20bc64a60d0" path="/var/lib/kubelet/pods/dd36bbc0-38d1-4086-9493-e20bc64a60d0/volumes" Nov 26 08:53:07 crc kubenswrapper[4940]: I1126 08:53:07.888996 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"80118a04-9a91-4d85-817a-3cd24b169e18","Type":"ContainerStarted","Data":"07752c5c3d1f6e274ac602d598d86ca6b96b4202dd3e531c500d8a8b894914b0"} Nov 26 08:53:07 crc kubenswrapper[4940]: I1126 08:53:07.976448 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:08 crc kubenswrapper[4940]: I1126 08:53:08.603015 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 26 08:53:08 crc kubenswrapper[4940]: I1126 08:53:08.902340 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"80118a04-9a91-4d85-817a-3cd24b169e18","Type":"ContainerStarted","Data":"021fabfbf39936f60a8693e4bdc79793b0dd8a51faa72b6d8da45184ac1c2601"} Nov 26 08:53:08 crc kubenswrapper[4940]: I1126 08:53:08.902594 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 08:53:08 crc kubenswrapper[4940]: I1126 08:53:08.945200 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.945170741 podStartE2EDuration="3.945170741s" podCreationTimestamp="2025-11-26 08:53:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:53:08.927702225 +0000 UTC m=+7090.447843864" watchObservedRunningTime="2025-11-26 08:53:08.945170741 +0000 UTC m=+7090.465312410" Nov 26 08:53:11 crc kubenswrapper[4940]: I1126 08:53:11.150830 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 08:53:11 crc kubenswrapper[4940]: I1126 08:53:11.232666 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:11 crc kubenswrapper[4940]: I1126 08:53:11.942619 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="86786994-9e11-4fa2-8892-7390a41315a0" containerName="cinder-scheduler" containerID="cri-o://5ad81fbdc6d73b1c46414234eca2b579c088df48bf4a2b044a44949b18f7e31e" gracePeriod=30 Nov 26 08:53:11 crc kubenswrapper[4940]: I1126 08:53:11.942770 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="86786994-9e11-4fa2-8892-7390a41315a0" containerName="probe" containerID="cri-o://553980f9e32b5907397d2eead56d683b5eaaeeeac4ad04f2edecd79bc55d5199" gracePeriod=30 Nov 26 08:53:12 crc kubenswrapper[4940]: I1126 08:53:12.972448 4940 generic.go:334] "Generic (PLEG): container finished" podID="86786994-9e11-4fa2-8892-7390a41315a0" containerID="553980f9e32b5907397d2eead56d683b5eaaeeeac4ad04f2edecd79bc55d5199" exitCode=0 Nov 26 08:53:12 crc kubenswrapper[4940]: I1126 08:53:12.972818 4940 generic.go:334] "Generic (PLEG): container finished" podID="86786994-9e11-4fa2-8892-7390a41315a0" containerID="5ad81fbdc6d73b1c46414234eca2b579c088df48bf4a2b044a44949b18f7e31e" exitCode=0 Nov 26 08:53:12 crc kubenswrapper[4940]: I1126 08:53:12.972861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"86786994-9e11-4fa2-8892-7390a41315a0","Type":"ContainerDied","Data":"553980f9e32b5907397d2eead56d683b5eaaeeeac4ad04f2edecd79bc55d5199"} Nov 26 08:53:12 crc kubenswrapper[4940]: I1126 08:53:12.972892 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"86786994-9e11-4fa2-8892-7390a41315a0","Type":"ContainerDied","Data":"5ad81fbdc6d73b1c46414234eca2b579c088df48bf4a2b044a44949b18f7e31e"} Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.175924 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.209567 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.225825 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-combined-ca-bundle\") pod \"86786994-9e11-4fa2-8892-7390a41315a0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.225880 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data\") pod \"86786994-9e11-4fa2-8892-7390a41315a0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.225954 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data-custom\") pod \"86786994-9e11-4fa2-8892-7390a41315a0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.225995 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-scripts\") pod \"86786994-9e11-4fa2-8892-7390a41315a0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.226124 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4nqw\" (UniqueName: \"kubernetes.io/projected/86786994-9e11-4fa2-8892-7390a41315a0-kube-api-access-h4nqw\") pod \"86786994-9e11-4fa2-8892-7390a41315a0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.226158 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/86786994-9e11-4fa2-8892-7390a41315a0-etc-machine-id\") pod \"86786994-9e11-4fa2-8892-7390a41315a0\" (UID: \"86786994-9e11-4fa2-8892-7390a41315a0\") " Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.229492 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86786994-9e11-4fa2-8892-7390a41315a0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "86786994-9e11-4fa2-8892-7390a41315a0" (UID: "86786994-9e11-4fa2-8892-7390a41315a0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.240117 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-scripts" (OuterVolumeSpecName: "scripts") pod "86786994-9e11-4fa2-8892-7390a41315a0" (UID: "86786994-9e11-4fa2-8892-7390a41315a0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.243625 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86786994-9e11-4fa2-8892-7390a41315a0-kube-api-access-h4nqw" (OuterVolumeSpecName: "kube-api-access-h4nqw") pod "86786994-9e11-4fa2-8892-7390a41315a0" (UID: "86786994-9e11-4fa2-8892-7390a41315a0"). InnerVolumeSpecName "kube-api-access-h4nqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.251339 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "86786994-9e11-4fa2-8892-7390a41315a0" (UID: "86786994-9e11-4fa2-8892-7390a41315a0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.305229 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "86786994-9e11-4fa2-8892-7390a41315a0" (UID: "86786994-9e11-4fa2-8892-7390a41315a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.329792 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.330112 4940 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.330214 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.330292 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4nqw\" (UniqueName: \"kubernetes.io/projected/86786994-9e11-4fa2-8892-7390a41315a0-kube-api-access-h4nqw\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.330374 4940 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/86786994-9e11-4fa2-8892-7390a41315a0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.348869 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data" (OuterVolumeSpecName: "config-data") pod "86786994-9e11-4fa2-8892-7390a41315a0" (UID: "86786994-9e11-4fa2-8892-7390a41315a0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.436429 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86786994-9e11-4fa2-8892-7390a41315a0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.813317 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.987358 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"86786994-9e11-4fa2-8892-7390a41315a0","Type":"ContainerDied","Data":"7bde00443e52f99bfe7e050c11cccc1c8020121dd5a73fc0180fd2193756f5d2"} Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.987904 4940 scope.go:117] "RemoveContainer" containerID="553980f9e32b5907397d2eead56d683b5eaaeeeac4ad04f2edecd79bc55d5199" Nov 26 08:53:13 crc kubenswrapper[4940]: I1126 08:53:13.987446 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.017089 4940 scope.go:117] "RemoveContainer" containerID="5ad81fbdc6d73b1c46414234eca2b579c088df48bf4a2b044a44949b18f7e31e" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.027306 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.050221 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.059555 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:14 crc kubenswrapper[4940]: E1126 08:53:14.059962 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86786994-9e11-4fa2-8892-7390a41315a0" containerName="probe" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.059980 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="86786994-9e11-4fa2-8892-7390a41315a0" containerName="probe" Nov 26 08:53:14 crc kubenswrapper[4940]: E1126 08:53:14.060012 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86786994-9e11-4fa2-8892-7390a41315a0" containerName="cinder-scheduler" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.060019 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="86786994-9e11-4fa2-8892-7390a41315a0" containerName="cinder-scheduler" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.060240 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="86786994-9e11-4fa2-8892-7390a41315a0" containerName="probe" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.060256 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="86786994-9e11-4fa2-8892-7390a41315a0" containerName="cinder-scheduler" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.061285 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.064740 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.067166 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.151590 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-config-data\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.151691 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-scripts\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.151744 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.151778 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/64575794-fe95-4733-bb74-66dcc92daec4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.151815 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.151847 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gff2m\" (UniqueName: \"kubernetes.io/projected/64575794-fe95-4733-bb74-66dcc92daec4-kube-api-access-gff2m\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.254195 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gff2m\" (UniqueName: \"kubernetes.io/projected/64575794-fe95-4733-bb74-66dcc92daec4-kube-api-access-gff2m\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.254419 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-config-data\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.254519 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-scripts\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.254578 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.254611 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/64575794-fe95-4733-bb74-66dcc92daec4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.254653 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.255210 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/64575794-fe95-4733-bb74-66dcc92daec4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.262240 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.269334 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-scripts\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.269753 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-config-data\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.274771 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gff2m\" (UniqueName: \"kubernetes.io/projected/64575794-fe95-4733-bb74-66dcc92daec4-kube-api-access-gff2m\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.282116 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64575794-fe95-4733-bb74-66dcc92daec4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"64575794-fe95-4733-bb74-66dcc92daec4\") " pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.380904 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 08:53:14 crc kubenswrapper[4940]: I1126 08:53:14.878564 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 08:53:15 crc kubenswrapper[4940]: I1126 08:53:15.001010 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"64575794-fe95-4733-bb74-66dcc92daec4","Type":"ContainerStarted","Data":"ab8328c18686727c5c11c88538c3683a9e65a16f2e29b458a9f33f9c39394454"} Nov 26 08:53:15 crc kubenswrapper[4940]: I1126 08:53:15.179440 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86786994-9e11-4fa2-8892-7390a41315a0" path="/var/lib/kubelet/pods/86786994-9e11-4fa2-8892-7390a41315a0/volumes" Nov 26 08:53:16 crc kubenswrapper[4940]: I1126 08:53:16.016799 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"64575794-fe95-4733-bb74-66dcc92daec4","Type":"ContainerStarted","Data":"670f7f54d121aea79ff4c9c0870b5e493ac1965a1d126b35c3624abeb066b7a9"} Nov 26 08:53:17 crc kubenswrapper[4940]: I1126 08:53:17.043973 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"64575794-fe95-4733-bb74-66dcc92daec4","Type":"ContainerStarted","Data":"65c46cd24ad5e4b29dc15c85ac0cffd2190d86b0cf2a8ad31319b15b02b33170"} Nov 26 08:53:17 crc kubenswrapper[4940]: I1126 08:53:17.071299 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.07128005 podStartE2EDuration="3.07128005s" podCreationTimestamp="2025-11-26 08:53:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:53:17.06906604 +0000 UTC m=+7098.589207659" watchObservedRunningTime="2025-11-26 08:53:17.07128005 +0000 UTC m=+7098.591421669" Nov 26 08:53:18 crc kubenswrapper[4940]: I1126 08:53:18.176157 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.318091 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gmzht"] Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.320868 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.329446 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gmzht"] Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.382495 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.463628 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsqvt\" (UniqueName: \"kubernetes.io/projected/e86c3478-5b6f-4d03-ba01-b202bb74e48b-kube-api-access-tsqvt\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.463682 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-utilities\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.465238 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-catalog-content\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.566694 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-catalog-content\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.566816 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsqvt\" (UniqueName: \"kubernetes.io/projected/e86c3478-5b6f-4d03-ba01-b202bb74e48b-kube-api-access-tsqvt\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.566844 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-utilities\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.567373 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-utilities\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.567652 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-catalog-content\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.587087 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsqvt\" (UniqueName: \"kubernetes.io/projected/e86c3478-5b6f-4d03-ba01-b202bb74e48b-kube-api-access-tsqvt\") pod \"redhat-marketplace-gmzht\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:19 crc kubenswrapper[4940]: I1126 08:53:19.653773 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:20 crc kubenswrapper[4940]: I1126 08:53:20.157628 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gmzht"] Nov 26 08:53:21 crc kubenswrapper[4940]: I1126 08:53:21.085848 4940 generic.go:334] "Generic (PLEG): container finished" podID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerID="585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557" exitCode=0 Nov 26 08:53:21 crc kubenswrapper[4940]: I1126 08:53:21.085901 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gmzht" event={"ID":"e86c3478-5b6f-4d03-ba01-b202bb74e48b","Type":"ContainerDied","Data":"585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557"} Nov 26 08:53:21 crc kubenswrapper[4940]: I1126 08:53:21.086269 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gmzht" event={"ID":"e86c3478-5b6f-4d03-ba01-b202bb74e48b","Type":"ContainerStarted","Data":"0687d3e5d0673dba10fb5a5d00c7872292445ecf005c7dab12d17f1b8bdb2451"} Nov 26 08:53:21 crc kubenswrapper[4940]: I1126 08:53:21.088680 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:53:21 crc kubenswrapper[4940]: I1126 08:53:21.728661 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:53:21 crc kubenswrapper[4940]: I1126 08:53:21.728735 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:53:23 crc kubenswrapper[4940]: I1126 08:53:23.118448 4940 generic.go:334] "Generic (PLEG): container finished" podID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerID="d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553" exitCode=0 Nov 26 08:53:23 crc kubenswrapper[4940]: I1126 08:53:23.118501 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gmzht" event={"ID":"e86c3478-5b6f-4d03-ba01-b202bb74e48b","Type":"ContainerDied","Data":"d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553"} Nov 26 08:53:24 crc kubenswrapper[4940]: I1126 08:53:24.129624 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gmzht" event={"ID":"e86c3478-5b6f-4d03-ba01-b202bb74e48b","Type":"ContainerStarted","Data":"0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff"} Nov 26 08:53:24 crc kubenswrapper[4940]: I1126 08:53:24.148724 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gmzht" podStartSLOduration=2.701095179 podStartE2EDuration="5.148708369s" podCreationTimestamp="2025-11-26 08:53:19 +0000 UTC" firstStartedPulling="2025-11-26 08:53:21.0884109 +0000 UTC m=+7102.608552519" lastFinishedPulling="2025-11-26 08:53:23.53602408 +0000 UTC m=+7105.056165709" observedRunningTime="2025-11-26 08:53:24.14653951 +0000 UTC m=+7105.666681129" watchObservedRunningTime="2025-11-26 08:53:24.148708369 +0000 UTC m=+7105.668849988" Nov 26 08:53:24 crc kubenswrapper[4940]: I1126 08:53:24.598629 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 08:53:29 crc kubenswrapper[4940]: I1126 08:53:29.654907 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:29 crc kubenswrapper[4940]: I1126 08:53:29.655453 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:29 crc kubenswrapper[4940]: I1126 08:53:29.706392 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:30 crc kubenswrapper[4940]: I1126 08:53:30.275181 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:30 crc kubenswrapper[4940]: I1126 08:53:30.337292 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gmzht"] Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.224333 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gmzht" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerName="registry-server" containerID="cri-o://0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff" gracePeriod=2 Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.709511 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.847991 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-catalog-content\") pod \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.848170 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsqvt\" (UniqueName: \"kubernetes.io/projected/e86c3478-5b6f-4d03-ba01-b202bb74e48b-kube-api-access-tsqvt\") pod \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.848509 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-utilities\") pod \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\" (UID: \"e86c3478-5b6f-4d03-ba01-b202bb74e48b\") " Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.850964 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-utilities" (OuterVolumeSpecName: "utilities") pod "e86c3478-5b6f-4d03-ba01-b202bb74e48b" (UID: "e86c3478-5b6f-4d03-ba01-b202bb74e48b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.856753 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e86c3478-5b6f-4d03-ba01-b202bb74e48b-kube-api-access-tsqvt" (OuterVolumeSpecName: "kube-api-access-tsqvt") pod "e86c3478-5b6f-4d03-ba01-b202bb74e48b" (UID: "e86c3478-5b6f-4d03-ba01-b202bb74e48b"). InnerVolumeSpecName "kube-api-access-tsqvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.889166 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e86c3478-5b6f-4d03-ba01-b202bb74e48b" (UID: "e86c3478-5b6f-4d03-ba01-b202bb74e48b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.951799 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.951843 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e86c3478-5b6f-4d03-ba01-b202bb74e48b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:32 crc kubenswrapper[4940]: I1126 08:53:32.951857 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsqvt\" (UniqueName: \"kubernetes.io/projected/e86c3478-5b6f-4d03-ba01-b202bb74e48b-kube-api-access-tsqvt\") on node \"crc\" DevicePath \"\"" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.236064 4940 generic.go:334] "Generic (PLEG): container finished" podID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerID="0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff" exitCode=0 Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.236074 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gmzht" event={"ID":"e86c3478-5b6f-4d03-ba01-b202bb74e48b","Type":"ContainerDied","Data":"0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff"} Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.236151 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gmzht" event={"ID":"e86c3478-5b6f-4d03-ba01-b202bb74e48b","Type":"ContainerDied","Data":"0687d3e5d0673dba10fb5a5d00c7872292445ecf005c7dab12d17f1b8bdb2451"} Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.236112 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gmzht" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.236182 4940 scope.go:117] "RemoveContainer" containerID="0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.261213 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gmzht"] Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.270835 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gmzht"] Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.271910 4940 scope.go:117] "RemoveContainer" containerID="d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.348985 4940 scope.go:117] "RemoveContainer" containerID="585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.378697 4940 scope.go:117] "RemoveContainer" containerID="0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff" Nov 26 08:53:33 crc kubenswrapper[4940]: E1126 08:53:33.379142 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff\": container with ID starting with 0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff not found: ID does not exist" containerID="0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.379196 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff"} err="failed to get container status \"0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff\": rpc error: code = NotFound desc = could not find container \"0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff\": container with ID starting with 0f92665411897b4fc92274ff1e6e6e253187f09e4616c648efa21287e3ef1cff not found: ID does not exist" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.379290 4940 scope.go:117] "RemoveContainer" containerID="d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553" Nov 26 08:53:33 crc kubenswrapper[4940]: E1126 08:53:33.379617 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553\": container with ID starting with d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553 not found: ID does not exist" containerID="d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.379648 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553"} err="failed to get container status \"d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553\": rpc error: code = NotFound desc = could not find container \"d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553\": container with ID starting with d0fea1b81cd81386693ce0ee11589cae11d0a26e880265951ac1925940feb553 not found: ID does not exist" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.379671 4940 scope.go:117] "RemoveContainer" containerID="585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557" Nov 26 08:53:33 crc kubenswrapper[4940]: E1126 08:53:33.379893 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557\": container with ID starting with 585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557 not found: ID does not exist" containerID="585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557" Nov 26 08:53:33 crc kubenswrapper[4940]: I1126 08:53:33.379922 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557"} err="failed to get container status \"585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557\": rpc error: code = NotFound desc = could not find container \"585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557\": container with ID starting with 585df0e1e9a5ac8121695934df39cfb22914342bbb3df93fc74aade76c683557 not found: ID does not exist" Nov 26 08:53:35 crc kubenswrapper[4940]: I1126 08:53:35.177058 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" path="/var/lib/kubelet/pods/e86c3478-5b6f-4d03-ba01-b202bb74e48b/volumes" Nov 26 08:53:51 crc kubenswrapper[4940]: I1126 08:53:51.727857 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:53:51 crc kubenswrapper[4940]: I1126 08:53:51.728391 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:54:04 crc kubenswrapper[4940]: I1126 08:54:04.069634 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-cd18-account-create-update-n4wgd"] Nov 26 08:54:04 crc kubenswrapper[4940]: I1126 08:54:04.080058 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-5zt9x"] Nov 26 08:54:04 crc kubenswrapper[4940]: I1126 08:54:04.087561 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-cd18-account-create-update-n4wgd"] Nov 26 08:54:04 crc kubenswrapper[4940]: I1126 08:54:04.094358 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-5zt9x"] Nov 26 08:54:05 crc kubenswrapper[4940]: I1126 08:54:05.183360 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63e84272-622c-49b0-a1d5-d13bce734c64" path="/var/lib/kubelet/pods/63e84272-622c-49b0-a1d5-d13bce734c64/volumes" Nov 26 08:54:05 crc kubenswrapper[4940]: I1126 08:54:05.183884 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9e91a83-324a-4101-b142-bdb57ed475ac" path="/var/lib/kubelet/pods/f9e91a83-324a-4101-b142-bdb57ed475ac/volumes" Nov 26 08:54:12 crc kubenswrapper[4940]: I1126 08:54:12.302219 4940 scope.go:117] "RemoveContainer" containerID="01ea5669a82f2c9ceb860a26d54c3e24f7d5083c7165f5a711cff997ca16ff6b" Nov 26 08:54:12 crc kubenswrapper[4940]: I1126 08:54:12.336317 4940 scope.go:117] "RemoveContainer" containerID="591f2f4ef6e508a270b5d8f0b185cdd353717be5a56703cf615159d43dd0b1d4" Nov 26 08:54:12 crc kubenswrapper[4940]: I1126 08:54:12.408463 4940 scope.go:117] "RemoveContainer" containerID="c8992018d04e40e16e5d12c0c843c39118f802f8850f42cb6b9f571e550497f4" Nov 26 08:54:12 crc kubenswrapper[4940]: I1126 08:54:12.458146 4940 scope.go:117] "RemoveContainer" containerID="ab89f4f6e8a00976895dc06bfa33d886f0516f81e08b7e39d88ddcf38e211237" Nov 26 08:54:16 crc kubenswrapper[4940]: I1126 08:54:16.045749 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-5lxt4"] Nov 26 08:54:16 crc kubenswrapper[4940]: I1126 08:54:16.057602 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-5lxt4"] Nov 26 08:54:17 crc kubenswrapper[4940]: I1126 08:54:17.188876 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3" path="/var/lib/kubelet/pods/7da290d9-4fcf-4d30-ba77-6fea9dcd5ce3/volumes" Nov 26 08:54:21 crc kubenswrapper[4940]: I1126 08:54:21.728252 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:54:21 crc kubenswrapper[4940]: I1126 08:54:21.728775 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:54:21 crc kubenswrapper[4940]: I1126 08:54:21.728827 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:54:21 crc kubenswrapper[4940]: I1126 08:54:21.729782 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4097e33d00555a418131c81b7ef769c20a467eee44ca0e7c613ac70f0162336d"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:54:21 crc kubenswrapper[4940]: I1126 08:54:21.729841 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://4097e33d00555a418131c81b7ef769c20a467eee44ca0e7c613ac70f0162336d" gracePeriod=600 Nov 26 08:54:22 crc kubenswrapper[4940]: I1126 08:54:22.855617 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="4097e33d00555a418131c81b7ef769c20a467eee44ca0e7c613ac70f0162336d" exitCode=0 Nov 26 08:54:22 crc kubenswrapper[4940]: I1126 08:54:22.855728 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"4097e33d00555a418131c81b7ef769c20a467eee44ca0e7c613ac70f0162336d"} Nov 26 08:54:22 crc kubenswrapper[4940]: I1126 08:54:22.856362 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f"} Nov 26 08:54:22 crc kubenswrapper[4940]: I1126 08:54:22.856404 4940 scope.go:117] "RemoveContainer" containerID="664bdc63bcd76bc0d490393eac0b75fcc0ad7c27c77ea55837922c1d88f945ad" Nov 26 08:54:29 crc kubenswrapper[4940]: I1126 08:54:29.035391 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-t6hpq"] Nov 26 08:54:29 crc kubenswrapper[4940]: I1126 08:54:29.046606 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-t6hpq"] Nov 26 08:54:29 crc kubenswrapper[4940]: I1126 08:54:29.185528 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="924ac90c-c73d-4c72-b964-a25d7dece172" path="/var/lib/kubelet/pods/924ac90c-c73d-4c72-b964-a25d7dece172/volumes" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.800189 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5mr5x"] Nov 26 08:54:40 crc kubenswrapper[4940]: E1126 08:54:40.801102 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerName="registry-server" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.801118 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerName="registry-server" Nov 26 08:54:40 crc kubenswrapper[4940]: E1126 08:54:40.801167 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerName="extract-utilities" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.801175 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerName="extract-utilities" Nov 26 08:54:40 crc kubenswrapper[4940]: E1126 08:54:40.801196 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerName="extract-content" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.801203 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerName="extract-content" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.801411 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e86c3478-5b6f-4d03-ba01-b202bb74e48b" containerName="registry-server" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.803023 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.817663 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mr5x"] Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.933619 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-utilities\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.933689 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntpw8\" (UniqueName: \"kubernetes.io/projected/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-kube-api-access-ntpw8\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:40 crc kubenswrapper[4940]: I1126 08:54:40.933863 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-catalog-content\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:41 crc kubenswrapper[4940]: I1126 08:54:41.035202 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-utilities\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:41 crc kubenswrapper[4940]: I1126 08:54:41.035264 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntpw8\" (UniqueName: \"kubernetes.io/projected/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-kube-api-access-ntpw8\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:41 crc kubenswrapper[4940]: I1126 08:54:41.035309 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-catalog-content\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:41 crc kubenswrapper[4940]: I1126 08:54:41.035775 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-utilities\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:41 crc kubenswrapper[4940]: I1126 08:54:41.036925 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-catalog-content\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:41 crc kubenswrapper[4940]: I1126 08:54:41.055418 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntpw8\" (UniqueName: \"kubernetes.io/projected/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-kube-api-access-ntpw8\") pod \"certified-operators-5mr5x\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:41 crc kubenswrapper[4940]: I1126 08:54:41.165769 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:41 crc kubenswrapper[4940]: I1126 08:54:41.712859 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mr5x"] Nov 26 08:54:41 crc kubenswrapper[4940]: W1126 08:54:41.713913 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93bb6d0a_cb27_43a3_87e4_e297fab0cd94.slice/crio-4397678993cb48e414037cf789c4e36ac2df20eea655ce9d4bf04b13ca81d87c WatchSource:0}: Error finding container 4397678993cb48e414037cf789c4e36ac2df20eea655ce9d4bf04b13ca81d87c: Status 404 returned error can't find the container with id 4397678993cb48e414037cf789c4e36ac2df20eea655ce9d4bf04b13ca81d87c Nov 26 08:54:42 crc kubenswrapper[4940]: I1126 08:54:42.081426 4940 generic.go:334] "Generic (PLEG): container finished" podID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerID="b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98" exitCode=0 Nov 26 08:54:42 crc kubenswrapper[4940]: I1126 08:54:42.081486 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mr5x" event={"ID":"93bb6d0a-cb27-43a3-87e4-e297fab0cd94","Type":"ContainerDied","Data":"b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98"} Nov 26 08:54:42 crc kubenswrapper[4940]: I1126 08:54:42.081522 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mr5x" event={"ID":"93bb6d0a-cb27-43a3-87e4-e297fab0cd94","Type":"ContainerStarted","Data":"4397678993cb48e414037cf789c4e36ac2df20eea655ce9d4bf04b13ca81d87c"} Nov 26 08:54:43 crc kubenswrapper[4940]: I1126 08:54:43.092727 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mr5x" event={"ID":"93bb6d0a-cb27-43a3-87e4-e297fab0cd94","Type":"ContainerStarted","Data":"8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f"} Nov 26 08:54:44 crc kubenswrapper[4940]: I1126 08:54:44.103420 4940 generic.go:334] "Generic (PLEG): container finished" podID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerID="8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f" exitCode=0 Nov 26 08:54:44 crc kubenswrapper[4940]: I1126 08:54:44.103496 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mr5x" event={"ID":"93bb6d0a-cb27-43a3-87e4-e297fab0cd94","Type":"ContainerDied","Data":"8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f"} Nov 26 08:54:45 crc kubenswrapper[4940]: I1126 08:54:45.118107 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mr5x" event={"ID":"93bb6d0a-cb27-43a3-87e4-e297fab0cd94","Type":"ContainerStarted","Data":"52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369"} Nov 26 08:54:45 crc kubenswrapper[4940]: I1126 08:54:45.151902 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5mr5x" podStartSLOduration=2.687566726 podStartE2EDuration="5.151880149s" podCreationTimestamp="2025-11-26 08:54:40 +0000 UTC" firstStartedPulling="2025-11-26 08:54:42.083397799 +0000 UTC m=+7183.603539428" lastFinishedPulling="2025-11-26 08:54:44.547711192 +0000 UTC m=+7186.067852851" observedRunningTime="2025-11-26 08:54:45.144889635 +0000 UTC m=+7186.665031264" watchObservedRunningTime="2025-11-26 08:54:45.151880149 +0000 UTC m=+7186.672021768" Nov 26 08:54:51 crc kubenswrapper[4940]: I1126 08:54:51.177778 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:51 crc kubenswrapper[4940]: I1126 08:54:51.178549 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:51 crc kubenswrapper[4940]: I1126 08:54:51.241549 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:52 crc kubenswrapper[4940]: I1126 08:54:52.250104 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:52 crc kubenswrapper[4940]: I1126 08:54:52.315195 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5mr5x"] Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.233356 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5mr5x" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerName="registry-server" containerID="cri-o://52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369" gracePeriod=2 Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.764085 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.817144 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-utilities\") pod \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.817316 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntpw8\" (UniqueName: \"kubernetes.io/projected/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-kube-api-access-ntpw8\") pod \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.817343 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-catalog-content\") pod \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\" (UID: \"93bb6d0a-cb27-43a3-87e4-e297fab0cd94\") " Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.823153 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-kube-api-access-ntpw8" (OuterVolumeSpecName: "kube-api-access-ntpw8") pod "93bb6d0a-cb27-43a3-87e4-e297fab0cd94" (UID: "93bb6d0a-cb27-43a3-87e4-e297fab0cd94"). InnerVolumeSpecName "kube-api-access-ntpw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.827972 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-utilities" (OuterVolumeSpecName: "utilities") pod "93bb6d0a-cb27-43a3-87e4-e297fab0cd94" (UID: "93bb6d0a-cb27-43a3-87e4-e297fab0cd94"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.861789 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93bb6d0a-cb27-43a3-87e4-e297fab0cd94" (UID: "93bb6d0a-cb27-43a3-87e4-e297fab0cd94"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.921988 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.922079 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntpw8\" (UniqueName: \"kubernetes.io/projected/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-kube-api-access-ntpw8\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:54 crc kubenswrapper[4940]: I1126 08:54:54.922096 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93bb6d0a-cb27-43a3-87e4-e297fab0cd94-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.247620 4940 generic.go:334] "Generic (PLEG): container finished" podID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerID="52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369" exitCode=0 Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.247693 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mr5x" event={"ID":"93bb6d0a-cb27-43a3-87e4-e297fab0cd94","Type":"ContainerDied","Data":"52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369"} Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.247744 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mr5x" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.247782 4940 scope.go:117] "RemoveContainer" containerID="52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.247756 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mr5x" event={"ID":"93bb6d0a-cb27-43a3-87e4-e297fab0cd94","Type":"ContainerDied","Data":"4397678993cb48e414037cf789c4e36ac2df20eea655ce9d4bf04b13ca81d87c"} Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.279478 4940 scope.go:117] "RemoveContainer" containerID="8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.283786 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5mr5x"] Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.297321 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5mr5x"] Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.319200 4940 scope.go:117] "RemoveContainer" containerID="b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.351672 4940 scope.go:117] "RemoveContainer" containerID="52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369" Nov 26 08:54:55 crc kubenswrapper[4940]: E1126 08:54:55.352118 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369\": container with ID starting with 52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369 not found: ID does not exist" containerID="52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.352188 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369"} err="failed to get container status \"52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369\": rpc error: code = NotFound desc = could not find container \"52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369\": container with ID starting with 52e5775fa1186ff5da8f26c1ce88bbbefd99d56bd242bbee5f45e7ba069bb369 not found: ID does not exist" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.352236 4940 scope.go:117] "RemoveContainer" containerID="8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f" Nov 26 08:54:55 crc kubenswrapper[4940]: E1126 08:54:55.352682 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f\": container with ID starting with 8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f not found: ID does not exist" containerID="8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.352714 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f"} err="failed to get container status \"8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f\": rpc error: code = NotFound desc = could not find container \"8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f\": container with ID starting with 8c449f9af06bd7d77ddc0329c6d740e8c96daaf01a050567d60cc087ea9e894f not found: ID does not exist" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.352732 4940 scope.go:117] "RemoveContainer" containerID="b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98" Nov 26 08:54:55 crc kubenswrapper[4940]: E1126 08:54:55.352997 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98\": container with ID starting with b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98 not found: ID does not exist" containerID="b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98" Nov 26 08:54:55 crc kubenswrapper[4940]: I1126 08:54:55.353031 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98"} err="failed to get container status \"b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98\": rpc error: code = NotFound desc = could not find container \"b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98\": container with ID starting with b48972362e8bb5e4c7a70f82ff34656bc1e25826243b054781e6b55a5c97bd98 not found: ID does not exist" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.541067 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5958ff99bf-nxb6d"] Nov 26 08:54:56 crc kubenswrapper[4940]: E1126 08:54:56.541809 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerName="extract-content" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.541825 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerName="extract-content" Nov 26 08:54:56 crc kubenswrapper[4940]: E1126 08:54:56.541872 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerName="registry-server" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.541881 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerName="registry-server" Nov 26 08:54:56 crc kubenswrapper[4940]: E1126 08:54:56.541902 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerName="extract-utilities" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.541914 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerName="extract-utilities" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.542151 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" containerName="registry-server" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.543534 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.546404 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.546589 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.546727 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.546849 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-q2tg8" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.564620 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5958ff99bf-nxb6d"] Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.590230 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.590507 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerName="glance-log" containerID="cri-o://4d0dac25a3a7420857bb6f1c73f51ac9104242139c009d040c9920f8a02ead4e" gracePeriod=30 Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.590943 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerName="glance-httpd" containerID="cri-o://db8e92b7ef4d74dc42bc30a9e5d43e2e555e54e1c654e038cd2a7bcd1a001db5" gracePeriod=30 Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.667838 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.668107 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerName="glance-log" containerID="cri-o://d881374d257f417e1c4f533832cbdd4450b2c7c56acd2a6f0774f3372b5980cd" gracePeriod=30 Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.668653 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerName="glance-httpd" containerID="cri-o://a64b289b03cd3b7766145b3a34cb3b7a8dbb600e3ea64d6ad45822a6ac77cced" gracePeriod=30 Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.689461 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c035194-5527-4870-8500-e1e64ce38920-logs\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.689529 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2c035194-5527-4870-8500-e1e64ce38920-horizon-secret-key\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.689847 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-scripts\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.689951 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-config-data\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.689997 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np4h2\" (UniqueName: \"kubernetes.io/projected/2c035194-5527-4870-8500-e1e64ce38920-kube-api-access-np4h2\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.690741 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6f79b977b9-wvzvc"] Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.692557 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.702953 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6f79b977b9-wvzvc"] Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791175 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c035194-5527-4870-8500-e1e64ce38920-logs\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791404 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2c035194-5527-4870-8500-e1e64ce38920-horizon-secret-key\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791483 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbxkj\" (UniqueName: \"kubernetes.io/projected/6e4d2d07-fbd3-4557-948d-5719310bf1cd-kube-api-access-qbxkj\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791505 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-scripts\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791540 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-config-data\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791558 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6e4d2d07-fbd3-4557-948d-5719310bf1cd-horizon-secret-key\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791577 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e4d2d07-fbd3-4557-948d-5719310bf1cd-logs\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791601 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np4h2\" (UniqueName: \"kubernetes.io/projected/2c035194-5527-4870-8500-e1e64ce38920-kube-api-access-np4h2\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791632 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-scripts\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.791661 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-config-data\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.792054 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c035194-5527-4870-8500-e1e64ce38920-logs\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.792725 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-scripts\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.793246 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-config-data\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.796454 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2c035194-5527-4870-8500-e1e64ce38920-horizon-secret-key\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.806531 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np4h2\" (UniqueName: \"kubernetes.io/projected/2c035194-5527-4870-8500-e1e64ce38920-kube-api-access-np4h2\") pod \"horizon-5958ff99bf-nxb6d\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.866163 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.894421 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-config-data\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.894583 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbxkj\" (UniqueName: \"kubernetes.io/projected/6e4d2d07-fbd3-4557-948d-5719310bf1cd-kube-api-access-qbxkj\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.894637 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6e4d2d07-fbd3-4557-948d-5719310bf1cd-horizon-secret-key\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.894663 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e4d2d07-fbd3-4557-948d-5719310bf1cd-logs\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.894712 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-scripts\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.895660 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-scripts\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.896583 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e4d2d07-fbd3-4557-948d-5719310bf1cd-logs\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.896900 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-config-data\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.902535 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6e4d2d07-fbd3-4557-948d-5719310bf1cd-horizon-secret-key\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:56 crc kubenswrapper[4940]: I1126 08:54:56.911119 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbxkj\" (UniqueName: \"kubernetes.io/projected/6e4d2d07-fbd3-4557-948d-5719310bf1cd-kube-api-access-qbxkj\") pod \"horizon-6f79b977b9-wvzvc\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.061533 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.183176 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93bb6d0a-cb27-43a3-87e4-e297fab0cd94" path="/var/lib/kubelet/pods/93bb6d0a-cb27-43a3-87e4-e297fab0cd94/volumes" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.312168 4940 generic.go:334] "Generic (PLEG): container finished" podID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerID="4d0dac25a3a7420857bb6f1c73f51ac9104242139c009d040c9920f8a02ead4e" exitCode=143 Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.312272 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db","Type":"ContainerDied","Data":"4d0dac25a3a7420857bb6f1c73f51ac9104242139c009d040c9920f8a02ead4e"} Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.315113 4940 generic.go:334] "Generic (PLEG): container finished" podID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerID="d881374d257f417e1c4f533832cbdd4450b2c7c56acd2a6f0774f3372b5980cd" exitCode=143 Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.315158 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d0092e5e-b891-449c-a390-ad9741a14b8f","Type":"ContainerDied","Data":"d881374d257f417e1c4f533832cbdd4450b2c7c56acd2a6f0774f3372b5980cd"} Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.339526 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6f79b977b9-wvzvc"] Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.369640 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5958ff99bf-nxb6d"] Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.393612 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5958ff99bf-nxb6d"] Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.413875 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-98d9954b9-rg54m"] Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.418228 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.451597 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-98d9954b9-rg54m"] Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.509903 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7127829b-b2bf-48fe-866d-397b4628bcd6-horizon-secret-key\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.509965 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7127829b-b2bf-48fe-866d-397b4628bcd6-logs\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.510013 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-scripts\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.510113 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8c7z\" (UniqueName: \"kubernetes.io/projected/7127829b-b2bf-48fe-866d-397b4628bcd6-kube-api-access-s8c7z\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.510531 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-config-data\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.612692 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-scripts\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.613241 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8c7z\" (UniqueName: \"kubernetes.io/projected/7127829b-b2bf-48fe-866d-397b4628bcd6-kube-api-access-s8c7z\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.613319 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-config-data\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.613501 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7127829b-b2bf-48fe-866d-397b4628bcd6-horizon-secret-key\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.613539 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7127829b-b2bf-48fe-866d-397b4628bcd6-logs\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.614084 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7127829b-b2bf-48fe-866d-397b4628bcd6-logs\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.614721 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-scripts\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.620256 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-config-data\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.626855 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7127829b-b2bf-48fe-866d-397b4628bcd6-horizon-secret-key\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.632572 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8c7z\" (UniqueName: \"kubernetes.io/projected/7127829b-b2bf-48fe-866d-397b4628bcd6-kube-api-access-s8c7z\") pod \"horizon-98d9954b9-rg54m\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:57 crc kubenswrapper[4940]: I1126 08:54:57.748301 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:54:58 crc kubenswrapper[4940]: I1126 08:54:58.198240 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-98d9954b9-rg54m"] Nov 26 08:54:58 crc kubenswrapper[4940]: W1126 08:54:58.207519 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7127829b_b2bf_48fe_866d_397b4628bcd6.slice/crio-afab43604b722a710a0d915a27926e6e18d3481e2e4e254726e1d9ace47b81c9 WatchSource:0}: Error finding container afab43604b722a710a0d915a27926e6e18d3481e2e4e254726e1d9ace47b81c9: Status 404 returned error can't find the container with id afab43604b722a710a0d915a27926e6e18d3481e2e4e254726e1d9ace47b81c9 Nov 26 08:54:58 crc kubenswrapper[4940]: I1126 08:54:58.328349 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-98d9954b9-rg54m" event={"ID":"7127829b-b2bf-48fe-866d-397b4628bcd6","Type":"ContainerStarted","Data":"afab43604b722a710a0d915a27926e6e18d3481e2e4e254726e1d9ace47b81c9"} Nov 26 08:54:58 crc kubenswrapper[4940]: I1126 08:54:58.329746 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5958ff99bf-nxb6d" event={"ID":"2c035194-5527-4870-8500-e1e64ce38920","Type":"ContainerStarted","Data":"e9fdf175b225c249ade35c7fd95c56a16549903fd8aaeab9ca4228bfee4c4a83"} Nov 26 08:54:58 crc kubenswrapper[4940]: I1126 08:54:58.330970 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f79b977b9-wvzvc" event={"ID":"6e4d2d07-fbd3-4557-948d-5719310bf1cd","Type":"ContainerStarted","Data":"c70ba35d6a903567255bc004d0bb5ccbaff5f2697b49e45f134d81e211b4f863"} Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.353572 4940 generic.go:334] "Generic (PLEG): container finished" podID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerID="db8e92b7ef4d74dc42bc30a9e5d43e2e555e54e1c654e038cd2a7bcd1a001db5" exitCode=0 Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.353616 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db","Type":"ContainerDied","Data":"db8e92b7ef4d74dc42bc30a9e5d43e2e555e54e1c654e038cd2a7bcd1a001db5"} Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.354305 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db","Type":"ContainerDied","Data":"10706840b6c68785e82ab326bfff9027a0e0e8d1eb53d24927d18f613a4d4722"} Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.354328 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10706840b6c68785e82ab326bfff9027a0e0e8d1eb53d24927d18f613a4d4722" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.356566 4940 generic.go:334] "Generic (PLEG): container finished" podID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerID="a64b289b03cd3b7766145b3a34cb3b7a8dbb600e3ea64d6ad45822a6ac77cced" exitCode=0 Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.356607 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d0092e5e-b891-449c-a390-ad9741a14b8f","Type":"ContainerDied","Data":"a64b289b03cd3b7766145b3a34cb3b7a8dbb600e3ea64d6ad45822a6ac77cced"} Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.424787 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.430691 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.572172 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-httpd-run\") pod \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.572264 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-scripts\") pod \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.572329 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5d6f4\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-kube-api-access-5d6f4\") pod \"d0092e5e-b891-449c-a390-ad9741a14b8f\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.572381 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-config-data\") pod \"d0092e5e-b891-449c-a390-ad9741a14b8f\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.572446 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-httpd-run\") pod \"d0092e5e-b891-449c-a390-ad9741a14b8f\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.572727 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" (UID: "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573071 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d0092e5e-b891-449c-a390-ad9741a14b8f" (UID: "d0092e5e-b891-449c-a390-ad9741a14b8f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573217 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-combined-ca-bundle\") pod \"d0092e5e-b891-449c-a390-ad9741a14b8f\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573266 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-combined-ca-bundle\") pod \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573297 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-logs\") pod \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573338 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-logs\") pod \"d0092e5e-b891-449c-a390-ad9741a14b8f\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573369 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn2kv\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-kube-api-access-xn2kv\") pod \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573400 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-config-data\") pod \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573453 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-ceph\") pod \"d0092e5e-b891-449c-a390-ad9741a14b8f\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573480 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-scripts\") pod \"d0092e5e-b891-449c-a390-ad9741a14b8f\" (UID: \"d0092e5e-b891-449c-a390-ad9741a14b8f\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573508 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-ceph\") pod \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\" (UID: \"bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db\") " Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.573793 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-logs" (OuterVolumeSpecName: "logs") pod "d0092e5e-b891-449c-a390-ad9741a14b8f" (UID: "d0092e5e-b891-449c-a390-ad9741a14b8f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.574399 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.574421 4940 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.574432 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0092e5e-b891-449c-a390-ad9741a14b8f-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.578027 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-scripts" (OuterVolumeSpecName: "scripts") pod "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" (UID: "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.578191 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-logs" (OuterVolumeSpecName: "logs") pod "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" (UID: "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.581414 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-ceph" (OuterVolumeSpecName: "ceph") pod "d0092e5e-b891-449c-a390-ad9741a14b8f" (UID: "d0092e5e-b891-449c-a390-ad9741a14b8f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.581500 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-kube-api-access-xn2kv" (OuterVolumeSpecName: "kube-api-access-xn2kv") pod "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" (UID: "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db"). InnerVolumeSpecName "kube-api-access-xn2kv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.581690 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-scripts" (OuterVolumeSpecName: "scripts") pod "d0092e5e-b891-449c-a390-ad9741a14b8f" (UID: "d0092e5e-b891-449c-a390-ad9741a14b8f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.581983 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-kube-api-access-5d6f4" (OuterVolumeSpecName: "kube-api-access-5d6f4") pod "d0092e5e-b891-449c-a390-ad9741a14b8f" (UID: "d0092e5e-b891-449c-a390-ad9741a14b8f"). InnerVolumeSpecName "kube-api-access-5d6f4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.591735 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-ceph" (OuterVolumeSpecName: "ceph") pod "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" (UID: "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.625270 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" (UID: "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.635533 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-config-data" (OuterVolumeSpecName: "config-data") pod "d0092e5e-b891-449c-a390-ad9741a14b8f" (UID: "d0092e5e-b891-449c-a390-ad9741a14b8f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.640723 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-config-data" (OuterVolumeSpecName: "config-data") pod "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" (UID: "bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.659612 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0092e5e-b891-449c-a390-ad9741a14b8f" (UID: "d0092e5e-b891-449c-a390-ad9741a14b8f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676867 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5d6f4\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-kube-api-access-5d6f4\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676903 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676915 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676924 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676935 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676945 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn2kv\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-kube-api-access-xn2kv\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676954 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676966 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d0092e5e-b891-449c-a390-ad9741a14b8f-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676974 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0092e5e-b891-449c-a390-ad9741a14b8f-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676981 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:00 crc kubenswrapper[4940]: I1126 08:55:00.676990 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.370699 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d0092e5e-b891-449c-a390-ad9741a14b8f","Type":"ContainerDied","Data":"7987b7bedffebc61ea05308e149acadbdcc2e537d1f0eb5b97f6f9dbfcf74ac7"} Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.371410 4940 scope.go:117] "RemoveContainer" containerID="a64b289b03cd3b7766145b3a34cb3b7a8dbb600e3ea64d6ad45822a6ac77cced" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.370724 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.370724 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.418005 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.439728 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.453794 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.466311 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.476903 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:55:01 crc kubenswrapper[4940]: E1126 08:55:01.477346 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerName="glance-httpd" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.477363 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerName="glance-httpd" Nov 26 08:55:01 crc kubenswrapper[4940]: E1126 08:55:01.477379 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerName="glance-log" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.477386 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerName="glance-log" Nov 26 08:55:01 crc kubenswrapper[4940]: E1126 08:55:01.477405 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerName="glance-log" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.477411 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerName="glance-log" Nov 26 08:55:01 crc kubenswrapper[4940]: E1126 08:55:01.477428 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerName="glance-httpd" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.477433 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerName="glance-httpd" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.477619 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerName="glance-log" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.477641 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" containerName="glance-httpd" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.477671 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerName="glance-log" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.477692 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" containerName="glance-httpd" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.478718 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.482319 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.482361 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6cn6v" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.482361 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.485618 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.494095 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.495983 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.497847 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.503616 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599471 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-logs\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599780 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599811 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599867 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e9d6f60-4098-4e31-9153-d48155c79752-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599889 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599912 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599932 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-ceph\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599957 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxlwz\" (UniqueName: \"kubernetes.io/projected/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-kube-api-access-jxlwz\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599972 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.599993 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.600027 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e9d6f60-4098-4e31-9153-d48155c79752-logs\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.600061 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.600091 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdk7s\" (UniqueName: \"kubernetes.io/projected/5e9d6f60-4098-4e31-9153-d48155c79752-kube-api-access-kdk7s\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.600109 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e9d6f60-4098-4e31-9153-d48155c79752-ceph\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.702094 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.702171 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e9d6f60-4098-4e31-9153-d48155c79752-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.702197 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.702236 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.702268 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-ceph\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.702327 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxlwz\" (UniqueName: \"kubernetes.io/projected/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-kube-api-access-jxlwz\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.702350 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.703568 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.703712 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e9d6f60-4098-4e31-9153-d48155c79752-logs\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.703733 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.703065 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e9d6f60-4098-4e31-9153-d48155c79752-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.703785 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdk7s\" (UniqueName: \"kubernetes.io/projected/5e9d6f60-4098-4e31-9153-d48155c79752-kube-api-access-kdk7s\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.704152 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e9d6f60-4098-4e31-9153-d48155c79752-ceph\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.704319 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-logs\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.704375 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.708901 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.709802 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.710626 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-ceph\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.711135 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.713860 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.714714 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-logs\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.715927 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e9d6f60-4098-4e31-9153-d48155c79752-logs\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.716653 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5e9d6f60-4098-4e31-9153-d48155c79752-ceph\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.717010 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-config-data\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.717726 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.718492 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e9d6f60-4098-4e31-9153-d48155c79752-scripts\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.719884 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdk7s\" (UniqueName: \"kubernetes.io/projected/5e9d6f60-4098-4e31-9153-d48155c79752-kube-api-access-kdk7s\") pod \"glance-default-external-api-0\" (UID: \"5e9d6f60-4098-4e31-9153-d48155c79752\") " pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.719922 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxlwz\" (UniqueName: \"kubernetes.io/projected/3b35d195-bf22-45af-b0d6-0f21bf3d5a67-kube-api-access-jxlwz\") pod \"glance-default-internal-api-0\" (UID: \"3b35d195-bf22-45af-b0d6-0f21bf3d5a67\") " pod="openstack/glance-default-internal-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.810512 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 08:55:01 crc kubenswrapper[4940]: I1126 08:55:01.823983 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:03 crc kubenswrapper[4940]: I1126 08:55:03.177604 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db" path="/var/lib/kubelet/pods/bb4222c7-f443-4ac9-8b3a-1bc9ccfe03db/volumes" Nov 26 08:55:03 crc kubenswrapper[4940]: I1126 08:55:03.179467 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0092e5e-b891-449c-a390-ad9741a14b8f" path="/var/lib/kubelet/pods/d0092e5e-b891-449c-a390-ad9741a14b8f/volumes" Nov 26 08:55:05 crc kubenswrapper[4940]: I1126 08:55:05.726057 4940 scope.go:117] "RemoveContainer" containerID="d881374d257f417e1c4f533832cbdd4450b2c7c56acd2a6f0774f3372b5980cd" Nov 26 08:55:06 crc kubenswrapper[4940]: I1126 08:55:06.419338 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5958ff99bf-nxb6d" event={"ID":"2c035194-5527-4870-8500-e1e64ce38920","Type":"ContainerStarted","Data":"daa6382c7ebe0454481b1e45d4bfeb458e2a01c2ce104c8fae8da75f92c1b2ab"} Nov 26 08:55:06 crc kubenswrapper[4940]: I1126 08:55:06.422540 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f79b977b9-wvzvc" event={"ID":"6e4d2d07-fbd3-4557-948d-5719310bf1cd","Type":"ContainerStarted","Data":"e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796"} Nov 26 08:55:06 crc kubenswrapper[4940]: I1126 08:55:06.425326 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 08:55:06 crc kubenswrapper[4940]: I1126 08:55:06.429797 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-98d9954b9-rg54m" event={"ID":"7127829b-b2bf-48fe-866d-397b4628bcd6","Type":"ContainerStarted","Data":"c1176880b242fbf7ef76c3c0ec0f46f8f63237c361defc3801232ee37dc8f07f"} Nov 26 08:55:06 crc kubenswrapper[4940]: I1126 08:55:06.979425 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.444504 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e9d6f60-4098-4e31-9153-d48155c79752","Type":"ContainerStarted","Data":"cb8d159cfe04169b3687cbfcdeb3195f860d3821c437b6c7147bc8b69afe2e58"} Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.444880 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e9d6f60-4098-4e31-9153-d48155c79752","Type":"ContainerStarted","Data":"d949b9a743b8043efc21681eac9190f57bb4bcca2590e71d119fcacb7f1cf9f9"} Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.448622 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f79b977b9-wvzvc" event={"ID":"6e4d2d07-fbd3-4557-948d-5719310bf1cd","Type":"ContainerStarted","Data":"4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78"} Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.451205 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b35d195-bf22-45af-b0d6-0f21bf3d5a67","Type":"ContainerStarted","Data":"ccec46b45273da454d8df2adb05c9caac0cdcda3adc4b152b6afb634d5c4b9d1"} Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.453581 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-98d9954b9-rg54m" event={"ID":"7127829b-b2bf-48fe-866d-397b4628bcd6","Type":"ContainerStarted","Data":"d7a351c5bebb5c14a7dea54674691acc71e2bf2e2297cb58c110286f3cc09eb5"} Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.478227 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6f79b977b9-wvzvc" podStartSLOduration=3.016977415 podStartE2EDuration="11.478199904s" podCreationTimestamp="2025-11-26 08:54:56 +0000 UTC" firstStartedPulling="2025-11-26 08:54:57.344238796 +0000 UTC m=+7198.864380415" lastFinishedPulling="2025-11-26 08:55:05.805461285 +0000 UTC m=+7207.325602904" observedRunningTime="2025-11-26 08:55:07.473289677 +0000 UTC m=+7208.993431316" watchObservedRunningTime="2025-11-26 08:55:07.478199904 +0000 UTC m=+7208.998341523" Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.489609 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5958ff99bf-nxb6d" event={"ID":"2c035194-5527-4870-8500-e1e64ce38920","Type":"ContainerStarted","Data":"9393fbcfa6038ef6958028151c97bbbdd6fbf2575bbd514830f939f95f94ad7f"} Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.489796 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5958ff99bf-nxb6d" podUID="2c035194-5527-4870-8500-e1e64ce38920" containerName="horizon-log" containerID="cri-o://daa6382c7ebe0454481b1e45d4bfeb458e2a01c2ce104c8fae8da75f92c1b2ab" gracePeriod=30 Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.489923 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5958ff99bf-nxb6d" podUID="2c035194-5527-4870-8500-e1e64ce38920" containerName="horizon" containerID="cri-o://9393fbcfa6038ef6958028151c97bbbdd6fbf2575bbd514830f939f95f94ad7f" gracePeriod=30 Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.507690 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-98d9954b9-rg54m" podStartSLOduration=2.946674656 podStartE2EDuration="10.507666252s" podCreationTimestamp="2025-11-26 08:54:57 +0000 UTC" firstStartedPulling="2025-11-26 08:54:58.211068525 +0000 UTC m=+7199.731210144" lastFinishedPulling="2025-11-26 08:55:05.772060121 +0000 UTC m=+7207.292201740" observedRunningTime="2025-11-26 08:55:07.497371241 +0000 UTC m=+7209.017512880" watchObservedRunningTime="2025-11-26 08:55:07.507666252 +0000 UTC m=+7209.027807881" Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.520751 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5958ff99bf-nxb6d" podStartSLOduration=3.104452769 podStartE2EDuration="11.520728562s" podCreationTimestamp="2025-11-26 08:54:56 +0000 UTC" firstStartedPulling="2025-11-26 08:54:57.351301093 +0000 UTC m=+7198.871442712" lastFinishedPulling="2025-11-26 08:55:05.767576896 +0000 UTC m=+7207.287718505" observedRunningTime="2025-11-26 08:55:07.517503878 +0000 UTC m=+7209.037645497" watchObservedRunningTime="2025-11-26 08:55:07.520728562 +0000 UTC m=+7209.040870181" Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.748930 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:55:07 crc kubenswrapper[4940]: I1126 08:55:07.749238 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:55:08 crc kubenswrapper[4940]: I1126 08:55:08.501281 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5e9d6f60-4098-4e31-9153-d48155c79752","Type":"ContainerStarted","Data":"d61c6d6d55cf385dccb9106da1b582aa8cdbe5ae0f59250a5f7b762df63b4810"} Nov 26 08:55:08 crc kubenswrapper[4940]: I1126 08:55:08.506658 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b35d195-bf22-45af-b0d6-0f21bf3d5a67","Type":"ContainerStarted","Data":"577929f3e542d41486af98e6a5843cbd8cfb9e2e8a00081d63c8998cf7fc4a8f"} Nov 26 08:55:08 crc kubenswrapper[4940]: I1126 08:55:08.506700 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b35d195-bf22-45af-b0d6-0f21bf3d5a67","Type":"ContainerStarted","Data":"56a0c7c3af09a28420c42e02ff23f00eb36910c90e86addabff01a9db510f46c"} Nov 26 08:55:08 crc kubenswrapper[4940]: I1126 08:55:08.543191 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.543169443 podStartE2EDuration="7.543169443s" podCreationTimestamp="2025-11-26 08:55:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:55:08.52815852 +0000 UTC m=+7210.048300179" watchObservedRunningTime="2025-11-26 08:55:08.543169443 +0000 UTC m=+7210.063311072" Nov 26 08:55:08 crc kubenswrapper[4940]: I1126 08:55:08.555150 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.555125967 podStartE2EDuration="7.555125967s" podCreationTimestamp="2025-11-26 08:55:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:55:08.553292237 +0000 UTC m=+7210.073433886" watchObservedRunningTime="2025-11-26 08:55:08.555125967 +0000 UTC m=+7210.075267606" Nov 26 08:55:11 crc kubenswrapper[4940]: I1126 08:55:11.811581 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 08:55:11 crc kubenswrapper[4940]: I1126 08:55:11.812190 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 08:55:11 crc kubenswrapper[4940]: I1126 08:55:11.825444 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:11 crc kubenswrapper[4940]: I1126 08:55:11.825525 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:11 crc kubenswrapper[4940]: I1126 08:55:11.847034 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 08:55:11 crc kubenswrapper[4940]: I1126 08:55:11.865982 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 08:55:11 crc kubenswrapper[4940]: I1126 08:55:11.878405 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:11 crc kubenswrapper[4940]: I1126 08:55:11.878507 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.548746 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.549270 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.549373 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.549438 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.591629 4940 scope.go:117] "RemoveContainer" containerID="98aba1818861fa16b1e7c591993d05df210a7668e1b1be3633c1d1bd43bb3db9" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.634847 4940 scope.go:117] "RemoveContainer" containerID="9a55afa30dc65eaf4ea89064161eaf2a9e23de2c8a5fa2d263a7d85761cb82cf" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.727262 4940 scope.go:117] "RemoveContainer" containerID="4d0dac25a3a7420857bb6f1c73f51ac9104242139c009d040c9920f8a02ead4e" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.758719 4940 scope.go:117] "RemoveContainer" containerID="a42817c884d0ddd6bf93f456cc58add16eddbdef7698ddaf401989ed8ef00fe7" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.785434 4940 scope.go:117] "RemoveContainer" containerID="db8e92b7ef4d74dc42bc30a9e5d43e2e555e54e1c654e038cd2a7bcd1a001db5" Nov 26 08:55:12 crc kubenswrapper[4940]: I1126 08:55:12.809029 4940 scope.go:117] "RemoveContainer" containerID="adc75a3f5391ca2b9acacb2f6731d0c4dce3154523346fb959c2b50ada331a2a" Nov 26 08:55:14 crc kubenswrapper[4940]: I1126 08:55:14.567175 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 08:55:14 crc kubenswrapper[4940]: I1126 08:55:14.568483 4940 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 08:55:14 crc kubenswrapper[4940]: I1126 08:55:14.627074 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:14 crc kubenswrapper[4940]: I1126 08:55:14.695617 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 08:55:14 crc kubenswrapper[4940]: I1126 08:55:14.898586 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 08:55:15 crc kubenswrapper[4940]: I1126 08:55:15.593430 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 08:55:16 crc kubenswrapper[4940]: I1126 08:55:16.866780 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:55:17 crc kubenswrapper[4940]: I1126 08:55:17.061954 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:55:17 crc kubenswrapper[4940]: I1126 08:55:17.062287 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:55:17 crc kubenswrapper[4940]: I1126 08:55:17.064098 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6f79b977b9-wvzvc" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.136:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.136:8080: connect: connection refused" Nov 26 08:55:17 crc kubenswrapper[4940]: I1126 08:55:17.751079 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-98d9954b9-rg54m" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.137:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.137:8080: connect: connection refused" Nov 26 08:55:28 crc kubenswrapper[4940]: I1126 08:55:28.960812 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:55:29 crc kubenswrapper[4940]: I1126 08:55:29.603337 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:55:30 crc kubenswrapper[4940]: I1126 08:55:30.671270 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:55:31 crc kubenswrapper[4940]: I1126 08:55:31.388583 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:55:31 crc kubenswrapper[4940]: I1126 08:55:31.500799 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6f79b977b9-wvzvc"] Nov 26 08:55:31 crc kubenswrapper[4940]: I1126 08:55:31.501468 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6f79b977b9-wvzvc" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon-log" containerID="cri-o://e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796" gracePeriod=30 Nov 26 08:55:31 crc kubenswrapper[4940]: I1126 08:55:31.501694 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6f79b977b9-wvzvc" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon" containerID="cri-o://4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78" gracePeriod=30 Nov 26 08:55:35 crc kubenswrapper[4940]: I1126 08:55:35.797522 4940 generic.go:334] "Generic (PLEG): container finished" podID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerID="4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78" exitCode=0 Nov 26 08:55:35 crc kubenswrapper[4940]: I1126 08:55:35.797646 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f79b977b9-wvzvc" event={"ID":"6e4d2d07-fbd3-4557-948d-5719310bf1cd","Type":"ContainerDied","Data":"4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78"} Nov 26 08:55:37 crc kubenswrapper[4940]: I1126 08:55:37.062239 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6f79b977b9-wvzvc" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.136:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.136:8080: connect: connection refused" Nov 26 08:55:37 crc kubenswrapper[4940]: I1126 08:55:37.822616 4940 generic.go:334] "Generic (PLEG): container finished" podID="2c035194-5527-4870-8500-e1e64ce38920" containerID="9393fbcfa6038ef6958028151c97bbbdd6fbf2575bbd514830f939f95f94ad7f" exitCode=137 Nov 26 08:55:37 crc kubenswrapper[4940]: I1126 08:55:37.822908 4940 generic.go:334] "Generic (PLEG): container finished" podID="2c035194-5527-4870-8500-e1e64ce38920" containerID="daa6382c7ebe0454481b1e45d4bfeb458e2a01c2ce104c8fae8da75f92c1b2ab" exitCode=137 Nov 26 08:55:37 crc kubenswrapper[4940]: I1126 08:55:37.822929 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5958ff99bf-nxb6d" event={"ID":"2c035194-5527-4870-8500-e1e64ce38920","Type":"ContainerDied","Data":"9393fbcfa6038ef6958028151c97bbbdd6fbf2575bbd514830f939f95f94ad7f"} Nov 26 08:55:37 crc kubenswrapper[4940]: I1126 08:55:37.822952 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5958ff99bf-nxb6d" event={"ID":"2c035194-5527-4870-8500-e1e64ce38920","Type":"ContainerDied","Data":"daa6382c7ebe0454481b1e45d4bfeb458e2a01c2ce104c8fae8da75f92c1b2ab"} Nov 26 08:55:37 crc kubenswrapper[4940]: I1126 08:55:37.978962 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.079208 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2c035194-5527-4870-8500-e1e64ce38920-horizon-secret-key\") pod \"2c035194-5527-4870-8500-e1e64ce38920\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.079307 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-config-data\") pod \"2c035194-5527-4870-8500-e1e64ce38920\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.079359 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-np4h2\" (UniqueName: \"kubernetes.io/projected/2c035194-5527-4870-8500-e1e64ce38920-kube-api-access-np4h2\") pod \"2c035194-5527-4870-8500-e1e64ce38920\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.079507 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c035194-5527-4870-8500-e1e64ce38920-logs\") pod \"2c035194-5527-4870-8500-e1e64ce38920\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.079613 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-scripts\") pod \"2c035194-5527-4870-8500-e1e64ce38920\" (UID: \"2c035194-5527-4870-8500-e1e64ce38920\") " Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.080387 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c035194-5527-4870-8500-e1e64ce38920-logs" (OuterVolumeSpecName: "logs") pod "2c035194-5527-4870-8500-e1e64ce38920" (UID: "2c035194-5527-4870-8500-e1e64ce38920"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.085234 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c035194-5527-4870-8500-e1e64ce38920-kube-api-access-np4h2" (OuterVolumeSpecName: "kube-api-access-np4h2") pod "2c035194-5527-4870-8500-e1e64ce38920" (UID: "2c035194-5527-4870-8500-e1e64ce38920"). InnerVolumeSpecName "kube-api-access-np4h2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.085805 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c035194-5527-4870-8500-e1e64ce38920-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "2c035194-5527-4870-8500-e1e64ce38920" (UID: "2c035194-5527-4870-8500-e1e64ce38920"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.111487 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-scripts" (OuterVolumeSpecName: "scripts") pod "2c035194-5527-4870-8500-e1e64ce38920" (UID: "2c035194-5527-4870-8500-e1e64ce38920"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.119030 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-config-data" (OuterVolumeSpecName: "config-data") pod "2c035194-5527-4870-8500-e1e64ce38920" (UID: "2c035194-5527-4870-8500-e1e64ce38920"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.181821 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.182233 4940 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2c035194-5527-4870-8500-e1e64ce38920-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.182270 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c035194-5527-4870-8500-e1e64ce38920-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.182280 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-np4h2\" (UniqueName: \"kubernetes.io/projected/2c035194-5527-4870-8500-e1e64ce38920-kube-api-access-np4h2\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.182290 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c035194-5527-4870-8500-e1e64ce38920-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.837500 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5958ff99bf-nxb6d" event={"ID":"2c035194-5527-4870-8500-e1e64ce38920","Type":"ContainerDied","Data":"e9fdf175b225c249ade35c7fd95c56a16549903fd8aaeab9ca4228bfee4c4a83"} Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.837558 4940 scope.go:117] "RemoveContainer" containerID="9393fbcfa6038ef6958028151c97bbbdd6fbf2575bbd514830f939f95f94ad7f" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.837715 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5958ff99bf-nxb6d" Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.921698 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5958ff99bf-nxb6d"] Nov 26 08:55:38 crc kubenswrapper[4940]: I1126 08:55:38.932624 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5958ff99bf-nxb6d"] Nov 26 08:55:39 crc kubenswrapper[4940]: I1126 08:55:39.064684 4940 scope.go:117] "RemoveContainer" containerID="daa6382c7ebe0454481b1e45d4bfeb458e2a01c2ce104c8fae8da75f92c1b2ab" Nov 26 08:55:39 crc kubenswrapper[4940]: I1126 08:55:39.184782 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c035194-5527-4870-8500-e1e64ce38920" path="/var/lib/kubelet/pods/2c035194-5527-4870-8500-e1e64ce38920/volumes" Nov 26 08:55:47 crc kubenswrapper[4940]: I1126 08:55:47.062577 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6f79b977b9-wvzvc" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.136:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.136:8080: connect: connection refused" Nov 26 08:55:57 crc kubenswrapper[4940]: I1126 08:55:57.063187 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6f79b977b9-wvzvc" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.136:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.136:8080: connect: connection refused" Nov 26 08:55:57 crc kubenswrapper[4940]: I1126 08:55:57.063732 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:56:01 crc kubenswrapper[4940]: I1126 08:56:01.919364 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.072493 4940 generic.go:334] "Generic (PLEG): container finished" podID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerID="e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796" exitCode=137 Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.072534 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f79b977b9-wvzvc" event={"ID":"6e4d2d07-fbd3-4557-948d-5719310bf1cd","Type":"ContainerDied","Data":"e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796"} Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.072559 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6f79b977b9-wvzvc" event={"ID":"6e4d2d07-fbd3-4557-948d-5719310bf1cd","Type":"ContainerDied","Data":"c70ba35d6a903567255bc004d0bb5ccbaff5f2697b49e45f134d81e211b4f863"} Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.072569 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6f79b977b9-wvzvc" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.072577 4940 scope.go:117] "RemoveContainer" containerID="4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.080598 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-config-data\") pod \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.080707 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-scripts\") pod \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.080738 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbxkj\" (UniqueName: \"kubernetes.io/projected/6e4d2d07-fbd3-4557-948d-5719310bf1cd-kube-api-access-qbxkj\") pod \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.080854 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6e4d2d07-fbd3-4557-948d-5719310bf1cd-horizon-secret-key\") pod \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.080928 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e4d2d07-fbd3-4557-948d-5719310bf1cd-logs\") pod \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\" (UID: \"6e4d2d07-fbd3-4557-948d-5719310bf1cd\") " Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.081431 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e4d2d07-fbd3-4557-948d-5719310bf1cd-logs" (OuterVolumeSpecName: "logs") pod "6e4d2d07-fbd3-4557-948d-5719310bf1cd" (UID: "6e4d2d07-fbd3-4557-948d-5719310bf1cd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.086665 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e4d2d07-fbd3-4557-948d-5719310bf1cd-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "6e4d2d07-fbd3-4557-948d-5719310bf1cd" (UID: "6e4d2d07-fbd3-4557-948d-5719310bf1cd"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.086682 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e4d2d07-fbd3-4557-948d-5719310bf1cd-kube-api-access-qbxkj" (OuterVolumeSpecName: "kube-api-access-qbxkj") pod "6e4d2d07-fbd3-4557-948d-5719310bf1cd" (UID: "6e4d2d07-fbd3-4557-948d-5719310bf1cd"). InnerVolumeSpecName "kube-api-access-qbxkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.105287 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-scripts" (OuterVolumeSpecName: "scripts") pod "6e4d2d07-fbd3-4557-948d-5719310bf1cd" (UID: "6e4d2d07-fbd3-4557-948d-5719310bf1cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.116108 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-config-data" (OuterVolumeSpecName: "config-data") pod "6e4d2d07-fbd3-4557-948d-5719310bf1cd" (UID: "6e4d2d07-fbd3-4557-948d-5719310bf1cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.183348 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.183391 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e4d2d07-fbd3-4557-948d-5719310bf1cd-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.183407 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbxkj\" (UniqueName: \"kubernetes.io/projected/6e4d2d07-fbd3-4557-948d-5719310bf1cd-kube-api-access-qbxkj\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.183423 4940 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6e4d2d07-fbd3-4557-948d-5719310bf1cd-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.183435 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e4d2d07-fbd3-4557-948d-5719310bf1cd-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.239709 4940 scope.go:117] "RemoveContainer" containerID="e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.265512 4940 scope.go:117] "RemoveContainer" containerID="4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78" Nov 26 08:56:02 crc kubenswrapper[4940]: E1126 08:56:02.266073 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78\": container with ID starting with 4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78 not found: ID does not exist" containerID="4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.266105 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78"} err="failed to get container status \"4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78\": rpc error: code = NotFound desc = could not find container \"4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78\": container with ID starting with 4e7cd44281137d53407d7d1cccbf4a98709d4c62f73d73a57387dce2f8454a78 not found: ID does not exist" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.266127 4940 scope.go:117] "RemoveContainer" containerID="e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796" Nov 26 08:56:02 crc kubenswrapper[4940]: E1126 08:56:02.266403 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796\": container with ID starting with e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796 not found: ID does not exist" containerID="e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.266424 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796"} err="failed to get container status \"e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796\": rpc error: code = NotFound desc = could not find container \"e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796\": container with ID starting with e2fd8db52444c3ae4fb949d71a9fa5bd7ee63ca6bd7bf9c2b1ccdf8bb6866796 not found: ID does not exist" Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.421205 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6f79b977b9-wvzvc"] Nov 26 08:56:02 crc kubenswrapper[4940]: I1126 08:56:02.434943 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6f79b977b9-wvzvc"] Nov 26 08:56:03 crc kubenswrapper[4940]: I1126 08:56:03.179473 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" path="/var/lib/kubelet/pods/6e4d2d07-fbd3-4557-948d-5719310bf1cd/volumes" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.649719 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6575b86469-nmbzf"] Nov 26 08:56:13 crc kubenswrapper[4940]: E1126 08:56:13.650802 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.650819 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon" Nov 26 08:56:13 crc kubenswrapper[4940]: E1126 08:56:13.650850 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c035194-5527-4870-8500-e1e64ce38920" containerName="horizon-log" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.650857 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c035194-5527-4870-8500-e1e64ce38920" containerName="horizon-log" Nov 26 08:56:13 crc kubenswrapper[4940]: E1126 08:56:13.650881 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon-log" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.650888 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon-log" Nov 26 08:56:13 crc kubenswrapper[4940]: E1126 08:56:13.650901 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c035194-5527-4870-8500-e1e64ce38920" containerName="horizon" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.650907 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c035194-5527-4870-8500-e1e64ce38920" containerName="horizon" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.651134 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon-log" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.651161 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c035194-5527-4870-8500-e1e64ce38920" containerName="horizon-log" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.651182 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c035194-5527-4870-8500-e1e64ce38920" containerName="horizon" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.651197 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e4d2d07-fbd3-4557-948d-5719310bf1cd" containerName="horizon" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.652471 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.661710 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6575b86469-nmbzf"] Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.807954 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqzxc\" (UniqueName: \"kubernetes.io/projected/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-kube-api-access-dqzxc\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.807997 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-horizon-secret-key\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.808025 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-config-data\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.808066 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-scripts\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.808222 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-logs\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.910264 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqzxc\" (UniqueName: \"kubernetes.io/projected/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-kube-api-access-dqzxc\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.910315 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-horizon-secret-key\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.910349 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-config-data\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.910374 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-scripts\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.910421 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-logs\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.911493 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-scripts\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.911770 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-config-data\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.911979 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-logs\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.919016 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-horizon-secret-key\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.974920 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqzxc\" (UniqueName: \"kubernetes.io/projected/a2a5d0ad-50c9-42da-9978-51fe890fd3c4-kube-api-access-dqzxc\") pod \"horizon-6575b86469-nmbzf\" (UID: \"a2a5d0ad-50c9-42da-9978-51fe890fd3c4\") " pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:13 crc kubenswrapper[4940]: I1126 08:56:13.976699 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:14 crc kubenswrapper[4940]: I1126 08:56:14.569805 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6575b86469-nmbzf"] Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.049105 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-8jw7v"] Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.050557 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.057772 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-f7f7-account-create-update-7xqmf"] Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.059419 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.062929 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.068332 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-8jw7v"] Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.100854 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-f7f7-account-create-update-7xqmf"] Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.139474 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bfb71163-cff0-41a1-8400-cf97b444d624-operator-scripts\") pod \"heat-db-create-8jw7v\" (UID: \"bfb71163-cff0-41a1-8400-cf97b444d624\") " pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.139623 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2vnm\" (UniqueName: \"kubernetes.io/projected/bfb71163-cff0-41a1-8400-cf97b444d624-kube-api-access-n2vnm\") pod \"heat-db-create-8jw7v\" (UID: \"bfb71163-cff0-41a1-8400-cf97b444d624\") " pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.237696 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6575b86469-nmbzf" event={"ID":"a2a5d0ad-50c9-42da-9978-51fe890fd3c4","Type":"ContainerStarted","Data":"d99c9ebecaae39fcb322b4f7583f0962c316b646dbb902c9960fe20be6d18301"} Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.237772 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6575b86469-nmbzf" event={"ID":"a2a5d0ad-50c9-42da-9978-51fe890fd3c4","Type":"ContainerStarted","Data":"1770fb99835824b1c360c498fed8867f8a7e0861f79ae82e9d3894a1aff8c005"} Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.237792 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6575b86469-nmbzf" event={"ID":"a2a5d0ad-50c9-42da-9978-51fe890fd3c4","Type":"ContainerStarted","Data":"cc71eb249b6eef611b30815b77bf43e70497d5519860228008fb3e6cf954683a"} Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.240699 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch567\" (UniqueName: \"kubernetes.io/projected/eb422d31-3977-4c63-87f8-a90c62bc00f1-kube-api-access-ch567\") pod \"heat-f7f7-account-create-update-7xqmf\" (UID: \"eb422d31-3977-4c63-87f8-a90c62bc00f1\") " pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.240759 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2vnm\" (UniqueName: \"kubernetes.io/projected/bfb71163-cff0-41a1-8400-cf97b444d624-kube-api-access-n2vnm\") pod \"heat-db-create-8jw7v\" (UID: \"bfb71163-cff0-41a1-8400-cf97b444d624\") " pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.240955 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bfb71163-cff0-41a1-8400-cf97b444d624-operator-scripts\") pod \"heat-db-create-8jw7v\" (UID: \"bfb71163-cff0-41a1-8400-cf97b444d624\") " pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.241019 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb422d31-3977-4c63-87f8-a90c62bc00f1-operator-scripts\") pod \"heat-f7f7-account-create-update-7xqmf\" (UID: \"eb422d31-3977-4c63-87f8-a90c62bc00f1\") " pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.241934 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bfb71163-cff0-41a1-8400-cf97b444d624-operator-scripts\") pod \"heat-db-create-8jw7v\" (UID: \"bfb71163-cff0-41a1-8400-cf97b444d624\") " pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.265108 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6575b86469-nmbzf" podStartSLOduration=2.265072131 podStartE2EDuration="2.265072131s" podCreationTimestamp="2025-11-26 08:56:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:56:15.253598052 +0000 UTC m=+7276.773739671" watchObservedRunningTime="2025-11-26 08:56:15.265072131 +0000 UTC m=+7276.785213750" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.267100 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2vnm\" (UniqueName: \"kubernetes.io/projected/bfb71163-cff0-41a1-8400-cf97b444d624-kube-api-access-n2vnm\") pod \"heat-db-create-8jw7v\" (UID: \"bfb71163-cff0-41a1-8400-cf97b444d624\") " pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.342471 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch567\" (UniqueName: \"kubernetes.io/projected/eb422d31-3977-4c63-87f8-a90c62bc00f1-kube-api-access-ch567\") pod \"heat-f7f7-account-create-update-7xqmf\" (UID: \"eb422d31-3977-4c63-87f8-a90c62bc00f1\") " pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.343106 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb422d31-3977-4c63-87f8-a90c62bc00f1-operator-scripts\") pod \"heat-f7f7-account-create-update-7xqmf\" (UID: \"eb422d31-3977-4c63-87f8-a90c62bc00f1\") " pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.346120 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb422d31-3977-4c63-87f8-a90c62bc00f1-operator-scripts\") pod \"heat-f7f7-account-create-update-7xqmf\" (UID: \"eb422d31-3977-4c63-87f8-a90c62bc00f1\") " pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.360802 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch567\" (UniqueName: \"kubernetes.io/projected/eb422d31-3977-4c63-87f8-a90c62bc00f1-kube-api-access-ch567\") pod \"heat-f7f7-account-create-update-7xqmf\" (UID: \"eb422d31-3977-4c63-87f8-a90c62bc00f1\") " pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.372532 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.391219 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:15 crc kubenswrapper[4940]: W1126 08:56:15.888341 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfb71163_cff0_41a1_8400_cf97b444d624.slice/crio-f0b83a5da183ee18c05cd437219c4285f42cd4488dfd8223f980cbf6e0f30825 WatchSource:0}: Error finding container f0b83a5da183ee18c05cd437219c4285f42cd4488dfd8223f980cbf6e0f30825: Status 404 returned error can't find the container with id f0b83a5da183ee18c05cd437219c4285f42cd4488dfd8223f980cbf6e0f30825 Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.889577 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-8jw7v"] Nov 26 08:56:15 crc kubenswrapper[4940]: I1126 08:56:15.992905 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-f7f7-account-create-update-7xqmf"] Nov 26 08:56:16 crc kubenswrapper[4940]: I1126 08:56:16.247390 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-f7f7-account-create-update-7xqmf" event={"ID":"eb422d31-3977-4c63-87f8-a90c62bc00f1","Type":"ContainerStarted","Data":"63f74c87f4b6d16250ec889fbcd83573a9c0c8500b0b298cf0f88595e95651ab"} Nov 26 08:56:16 crc kubenswrapper[4940]: I1126 08:56:16.247698 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-f7f7-account-create-update-7xqmf" event={"ID":"eb422d31-3977-4c63-87f8-a90c62bc00f1","Type":"ContainerStarted","Data":"c9914cac005329a007adcc3834bc137a6bba418b4d66ce2c328b289b6e7bfb68"} Nov 26 08:56:16 crc kubenswrapper[4940]: I1126 08:56:16.251606 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-8jw7v" event={"ID":"bfb71163-cff0-41a1-8400-cf97b444d624","Type":"ContainerStarted","Data":"607a2df3f21c060272ac0cd8b5e1d06102e684b7d6e337f38c9a24df1730b09a"} Nov 26 08:56:16 crc kubenswrapper[4940]: I1126 08:56:16.251660 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-8jw7v" event={"ID":"bfb71163-cff0-41a1-8400-cf97b444d624","Type":"ContainerStarted","Data":"f0b83a5da183ee18c05cd437219c4285f42cd4488dfd8223f980cbf6e0f30825"} Nov 26 08:56:16 crc kubenswrapper[4940]: I1126 08:56:16.268835 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-f7f7-account-create-update-7xqmf" podStartSLOduration=1.26880805 podStartE2EDuration="1.26880805s" podCreationTimestamp="2025-11-26 08:56:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:56:16.260272386 +0000 UTC m=+7277.780414005" watchObservedRunningTime="2025-11-26 08:56:16.26880805 +0000 UTC m=+7277.788949669" Nov 26 08:56:16 crc kubenswrapper[4940]: I1126 08:56:16.287983 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-8jw7v" podStartSLOduration=1.287961126 podStartE2EDuration="1.287961126s" podCreationTimestamp="2025-11-26 08:56:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:56:16.280439904 +0000 UTC m=+7277.800581523" watchObservedRunningTime="2025-11-26 08:56:16.287961126 +0000 UTC m=+7277.808102745" Nov 26 08:56:17 crc kubenswrapper[4940]: I1126 08:56:17.281356 4940 generic.go:334] "Generic (PLEG): container finished" podID="bfb71163-cff0-41a1-8400-cf97b444d624" containerID="607a2df3f21c060272ac0cd8b5e1d06102e684b7d6e337f38c9a24df1730b09a" exitCode=0 Nov 26 08:56:17 crc kubenswrapper[4940]: I1126 08:56:17.282805 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-8jw7v" event={"ID":"bfb71163-cff0-41a1-8400-cf97b444d624","Type":"ContainerDied","Data":"607a2df3f21c060272ac0cd8b5e1d06102e684b7d6e337f38c9a24df1730b09a"} Nov 26 08:56:17 crc kubenswrapper[4940]: I1126 08:56:17.286921 4940 generic.go:334] "Generic (PLEG): container finished" podID="eb422d31-3977-4c63-87f8-a90c62bc00f1" containerID="63f74c87f4b6d16250ec889fbcd83573a9c0c8500b0b298cf0f88595e95651ab" exitCode=0 Nov 26 08:56:17 crc kubenswrapper[4940]: I1126 08:56:17.287005 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-f7f7-account-create-update-7xqmf" event={"ID":"eb422d31-3977-4c63-87f8-a90c62bc00f1","Type":"ContainerDied","Data":"63f74c87f4b6d16250ec889fbcd83573a9c0c8500b0b298cf0f88595e95651ab"} Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.766793 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.771433 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.921027 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2vnm\" (UniqueName: \"kubernetes.io/projected/bfb71163-cff0-41a1-8400-cf97b444d624-kube-api-access-n2vnm\") pod \"bfb71163-cff0-41a1-8400-cf97b444d624\" (UID: \"bfb71163-cff0-41a1-8400-cf97b444d624\") " Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.921133 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bfb71163-cff0-41a1-8400-cf97b444d624-operator-scripts\") pod \"bfb71163-cff0-41a1-8400-cf97b444d624\" (UID: \"bfb71163-cff0-41a1-8400-cf97b444d624\") " Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.921211 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb422d31-3977-4c63-87f8-a90c62bc00f1-operator-scripts\") pod \"eb422d31-3977-4c63-87f8-a90c62bc00f1\" (UID: \"eb422d31-3977-4c63-87f8-a90c62bc00f1\") " Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.921272 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ch567\" (UniqueName: \"kubernetes.io/projected/eb422d31-3977-4c63-87f8-a90c62bc00f1-kube-api-access-ch567\") pod \"eb422d31-3977-4c63-87f8-a90c62bc00f1\" (UID: \"eb422d31-3977-4c63-87f8-a90c62bc00f1\") " Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.922692 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfb71163-cff0-41a1-8400-cf97b444d624-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bfb71163-cff0-41a1-8400-cf97b444d624" (UID: "bfb71163-cff0-41a1-8400-cf97b444d624"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.922711 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb422d31-3977-4c63-87f8-a90c62bc00f1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eb422d31-3977-4c63-87f8-a90c62bc00f1" (UID: "eb422d31-3977-4c63-87f8-a90c62bc00f1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.929028 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb422d31-3977-4c63-87f8-a90c62bc00f1-kube-api-access-ch567" (OuterVolumeSpecName: "kube-api-access-ch567") pod "eb422d31-3977-4c63-87f8-a90c62bc00f1" (UID: "eb422d31-3977-4c63-87f8-a90c62bc00f1"). InnerVolumeSpecName "kube-api-access-ch567". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:56:18 crc kubenswrapper[4940]: I1126 08:56:18.941707 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfb71163-cff0-41a1-8400-cf97b444d624-kube-api-access-n2vnm" (OuterVolumeSpecName: "kube-api-access-n2vnm") pod "bfb71163-cff0-41a1-8400-cf97b444d624" (UID: "bfb71163-cff0-41a1-8400-cf97b444d624"). InnerVolumeSpecName "kube-api-access-n2vnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.023550 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bfb71163-cff0-41a1-8400-cf97b444d624-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.023593 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb422d31-3977-4c63-87f8-a90c62bc00f1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.023608 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ch567\" (UniqueName: \"kubernetes.io/projected/eb422d31-3977-4c63-87f8-a90c62bc00f1-kube-api-access-ch567\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.023621 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2vnm\" (UniqueName: \"kubernetes.io/projected/bfb71163-cff0-41a1-8400-cf97b444d624-kube-api-access-n2vnm\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.309888 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-8jw7v" event={"ID":"bfb71163-cff0-41a1-8400-cf97b444d624","Type":"ContainerDied","Data":"f0b83a5da183ee18c05cd437219c4285f42cd4488dfd8223f980cbf6e0f30825"} Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.309934 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-8jw7v" Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.309953 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0b83a5da183ee18c05cd437219c4285f42cd4488dfd8223f980cbf6e0f30825" Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.311929 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-f7f7-account-create-update-7xqmf" event={"ID":"eb422d31-3977-4c63-87f8-a90c62bc00f1","Type":"ContainerDied","Data":"c9914cac005329a007adcc3834bc137a6bba418b4d66ce2c328b289b6e7bfb68"} Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.312332 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9914cac005329a007adcc3834bc137a6bba418b4d66ce2c328b289b6e7bfb68" Nov 26 08:56:19 crc kubenswrapper[4940]: I1126 08:56:19.311996 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-f7f7-account-create-update-7xqmf" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.297757 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-kzlhr"] Nov 26 08:56:20 crc kubenswrapper[4940]: E1126 08:56:20.298211 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfb71163-cff0-41a1-8400-cf97b444d624" containerName="mariadb-database-create" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.298225 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfb71163-cff0-41a1-8400-cf97b444d624" containerName="mariadb-database-create" Nov 26 08:56:20 crc kubenswrapper[4940]: E1126 08:56:20.298252 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb422d31-3977-4c63-87f8-a90c62bc00f1" containerName="mariadb-account-create-update" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.298260 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb422d31-3977-4c63-87f8-a90c62bc00f1" containerName="mariadb-account-create-update" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.298514 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb422d31-3977-4c63-87f8-a90c62bc00f1" containerName="mariadb-account-create-update" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.298537 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfb71163-cff0-41a1-8400-cf97b444d624" containerName="mariadb-database-create" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.299718 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.303400 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-2clj2" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.303876 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.320871 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-kzlhr"] Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.449726 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-config-data\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.449859 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-combined-ca-bundle\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.449913 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98wmm\" (UniqueName: \"kubernetes.io/projected/62b1b100-b34d-47bd-9f08-f6061f6d61c6-kube-api-access-98wmm\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.552210 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-combined-ca-bundle\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.552270 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98wmm\" (UniqueName: \"kubernetes.io/projected/62b1b100-b34d-47bd-9f08-f6061f6d61c6-kube-api-access-98wmm\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.552392 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-config-data\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.557420 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-combined-ca-bundle\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.557650 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-config-data\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.581165 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98wmm\" (UniqueName: \"kubernetes.io/projected/62b1b100-b34d-47bd-9f08-f6061f6d61c6-kube-api-access-98wmm\") pod \"heat-db-sync-kzlhr\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:20 crc kubenswrapper[4940]: I1126 08:56:20.631692 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:21 crc kubenswrapper[4940]: W1126 08:56:21.105212 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62b1b100_b34d_47bd_9f08_f6061f6d61c6.slice/crio-d6a1df7dd2dd0d8aa8b16dcca9f01b28040e108a8d1b4fb852dd69b98285b1a8 WatchSource:0}: Error finding container d6a1df7dd2dd0d8aa8b16dcca9f01b28040e108a8d1b4fb852dd69b98285b1a8: Status 404 returned error can't find the container with id d6a1df7dd2dd0d8aa8b16dcca9f01b28040e108a8d1b4fb852dd69b98285b1a8 Nov 26 08:56:21 crc kubenswrapper[4940]: I1126 08:56:21.106158 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-kzlhr"] Nov 26 08:56:21 crc kubenswrapper[4940]: I1126 08:56:21.335510 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-kzlhr" event={"ID":"62b1b100-b34d-47bd-9f08-f6061f6d61c6","Type":"ContainerStarted","Data":"d6a1df7dd2dd0d8aa8b16dcca9f01b28040e108a8d1b4fb852dd69b98285b1a8"} Nov 26 08:56:23 crc kubenswrapper[4940]: I1126 08:56:23.978290 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:23 crc kubenswrapper[4940]: I1126 08:56:23.978641 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:30 crc kubenswrapper[4940]: I1126 08:56:30.420658 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-kzlhr" event={"ID":"62b1b100-b34d-47bd-9f08-f6061f6d61c6","Type":"ContainerStarted","Data":"6a7cf35a9878ac612d6692229c60f0dbfa4f3a49269bc26ecea27ed1c854a951"} Nov 26 08:56:32 crc kubenswrapper[4940]: I1126 08:56:32.446695 4940 generic.go:334] "Generic (PLEG): container finished" podID="62b1b100-b34d-47bd-9f08-f6061f6d61c6" containerID="6a7cf35a9878ac612d6692229c60f0dbfa4f3a49269bc26ecea27ed1c854a951" exitCode=0 Nov 26 08:56:32 crc kubenswrapper[4940]: I1126 08:56:32.446830 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-kzlhr" event={"ID":"62b1b100-b34d-47bd-9f08-f6061f6d61c6","Type":"ContainerDied","Data":"6a7cf35a9878ac612d6692229c60f0dbfa4f3a49269bc26ecea27ed1c854a951"} Nov 26 08:56:33 crc kubenswrapper[4940]: I1126 08:56:33.894220 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.059471 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-combined-ca-bundle\") pod \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.059540 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-98wmm\" (UniqueName: \"kubernetes.io/projected/62b1b100-b34d-47bd-9f08-f6061f6d61c6-kube-api-access-98wmm\") pod \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.059659 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-config-data\") pod \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\" (UID: \"62b1b100-b34d-47bd-9f08-f6061f6d61c6\") " Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.065253 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62b1b100-b34d-47bd-9f08-f6061f6d61c6-kube-api-access-98wmm" (OuterVolumeSpecName: "kube-api-access-98wmm") pod "62b1b100-b34d-47bd-9f08-f6061f6d61c6" (UID: "62b1b100-b34d-47bd-9f08-f6061f6d61c6"). InnerVolumeSpecName "kube-api-access-98wmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.094752 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62b1b100-b34d-47bd-9f08-f6061f6d61c6" (UID: "62b1b100-b34d-47bd-9f08-f6061f6d61c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.125096 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-config-data" (OuterVolumeSpecName: "config-data") pod "62b1b100-b34d-47bd-9f08-f6061f6d61c6" (UID: "62b1b100-b34d-47bd-9f08-f6061f6d61c6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.161388 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.161424 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-98wmm\" (UniqueName: \"kubernetes.io/projected/62b1b100-b34d-47bd-9f08-f6061f6d61c6-kube-api-access-98wmm\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.161435 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62b1b100-b34d-47bd-9f08-f6061f6d61c6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.487869 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-kzlhr" event={"ID":"62b1b100-b34d-47bd-9f08-f6061f6d61c6","Type":"ContainerDied","Data":"d6a1df7dd2dd0d8aa8b16dcca9f01b28040e108a8d1b4fb852dd69b98285b1a8"} Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.487967 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-kzlhr" Nov 26 08:56:34 crc kubenswrapper[4940]: I1126 08:56:34.487973 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d6a1df7dd2dd0d8aa8b16dcca9f01b28040e108a8d1b4fb852dd69b98285b1a8" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.693097 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-754b7cd586-zs8p6"] Nov 26 08:56:35 crc kubenswrapper[4940]: E1126 08:56:35.693597 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62b1b100-b34d-47bd-9f08-f6061f6d61c6" containerName="heat-db-sync" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.693831 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="62b1b100-b34d-47bd-9f08-f6061f6d61c6" containerName="heat-db-sync" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.694124 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="62b1b100-b34d-47bd-9f08-f6061f6d61c6" containerName="heat-db-sync" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.707442 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.719989 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.720221 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-2clj2" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.737098 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.740409 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-754b7cd586-zs8p6"] Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.777353 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-6ddd77f754-z9z4k"] Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.804256 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.806705 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.809073 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgpwc\" (UniqueName: \"kubernetes.io/projected/7e312342-1d38-478d-9ddd-dda028582760-kube-api-access-xgpwc\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.809190 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-config-data\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.809213 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-config-data-custom\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.809231 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-combined-ca-bundle\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.846565 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6ddd77f754-z9z4k"] Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.863492 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-56cc65db64-2vj5l"] Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.867096 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.874269 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.888118 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-56cc65db64-2vj5l"] Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.910503 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-config-data\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.910657 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgpwc\" (UniqueName: \"kubernetes.io/projected/7e312342-1d38-478d-9ddd-dda028582760-kube-api-access-xgpwc\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.910697 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkzdn\" (UniqueName: \"kubernetes.io/projected/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-kube-api-access-pkzdn\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.910781 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-config-data\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.910807 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-config-data-custom\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.910833 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-config-data-custom\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.910855 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-combined-ca-bundle\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.910906 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-combined-ca-bundle\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.923377 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-combined-ca-bundle\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.925443 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgpwc\" (UniqueName: \"kubernetes.io/projected/7e312342-1d38-478d-9ddd-dda028582760-kube-api-access-xgpwc\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.925446 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-config-data\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:35 crc kubenswrapper[4940]: I1126 08:56:35.926702 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7e312342-1d38-478d-9ddd-dda028582760-config-data-custom\") pod \"heat-engine-754b7cd586-zs8p6\" (UID: \"7e312342-1d38-478d-9ddd-dda028582760\") " pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.012084 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-combined-ca-bundle\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.012149 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rdsq\" (UniqueName: \"kubernetes.io/projected/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-kube-api-access-8rdsq\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.012181 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-config-data-custom\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.012234 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-combined-ca-bundle\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.012486 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-config-data-custom\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.012522 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-config-data\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.012620 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-config-data\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.012648 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkzdn\" (UniqueName: \"kubernetes.io/projected/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-kube-api-access-pkzdn\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.018426 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-combined-ca-bundle\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.018721 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-config-data-custom\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.022863 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-config-data\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.031445 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkzdn\" (UniqueName: \"kubernetes.io/projected/ed206b9d-81f1-49bf-9dc1-d17d76ec052a-kube-api-access-pkzdn\") pod \"heat-api-6ddd77f754-z9z4k\" (UID: \"ed206b9d-81f1-49bf-9dc1-d17d76ec052a\") " pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.066527 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.113816 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-config-data\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.114394 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-combined-ca-bundle\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.114432 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rdsq\" (UniqueName: \"kubernetes.io/projected/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-kube-api-access-8rdsq\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.114483 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-config-data-custom\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.121409 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-config-data\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.122012 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-config-data-custom\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.125750 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-combined-ca-bundle\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.136174 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rdsq\" (UniqueName: \"kubernetes.io/projected/edb8e38b-3795-4451-9d4d-60f8ccd5bffc-kube-api-access-8rdsq\") pod \"heat-cfnapi-56cc65db64-2vj5l\" (UID: \"edb8e38b-3795-4451-9d4d-60f8ccd5bffc\") " pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.140316 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.192506 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.615541 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-754b7cd586-zs8p6"] Nov 26 08:56:36 crc kubenswrapper[4940]: W1126 08:56:36.622845 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e312342_1d38_478d_9ddd_dda028582760.slice/crio-9bb6616528969866d22b3556c87251afe4857782d823c028c696022877bfe65f WatchSource:0}: Error finding container 9bb6616528969866d22b3556c87251afe4857782d823c028c696022877bfe65f: Status 404 returned error can't find the container with id 9bb6616528969866d22b3556c87251afe4857782d823c028c696022877bfe65f Nov 26 08:56:36 crc kubenswrapper[4940]: W1126 08:56:36.741411 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded206b9d_81f1_49bf_9dc1_d17d76ec052a.slice/crio-0d550b76fa00eea1561a1716819a29d373a23305f44799d9a28feba347b0d046 WatchSource:0}: Error finding container 0d550b76fa00eea1561a1716819a29d373a23305f44799d9a28feba347b0d046: Status 404 returned error can't find the container with id 0d550b76fa00eea1561a1716819a29d373a23305f44799d9a28feba347b0d046 Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.744873 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6ddd77f754-z9z4k"] Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.792860 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:36 crc kubenswrapper[4940]: I1126 08:56:36.817609 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-56cc65db64-2vj5l"] Nov 26 08:56:36 crc kubenswrapper[4940]: W1126 08:56:36.821304 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedb8e38b_3795_4451_9d4d_60f8ccd5bffc.slice/crio-588651100bd2b53259d739e3de70066d98d8eb10b7206983d1da3a9584b63f6c WatchSource:0}: Error finding container 588651100bd2b53259d739e3de70066d98d8eb10b7206983d1da3a9584b63f6c: Status 404 returned error can't find the container with id 588651100bd2b53259d739e3de70066d98d8eb10b7206983d1da3a9584b63f6c Nov 26 08:56:37 crc kubenswrapper[4940]: I1126 08:56:37.526167 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6ddd77f754-z9z4k" event={"ID":"ed206b9d-81f1-49bf-9dc1-d17d76ec052a","Type":"ContainerStarted","Data":"0d550b76fa00eea1561a1716819a29d373a23305f44799d9a28feba347b0d046"} Nov 26 08:56:37 crc kubenswrapper[4940]: I1126 08:56:37.528069 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-754b7cd586-zs8p6" event={"ID":"7e312342-1d38-478d-9ddd-dda028582760","Type":"ContainerStarted","Data":"9b63205f364623b21745ecd438787a17a6ba0efcf30cbbe9f52ba9be4b424d61"} Nov 26 08:56:37 crc kubenswrapper[4940]: I1126 08:56:37.528115 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-754b7cd586-zs8p6" event={"ID":"7e312342-1d38-478d-9ddd-dda028582760","Type":"ContainerStarted","Data":"9bb6616528969866d22b3556c87251afe4857782d823c028c696022877bfe65f"} Nov 26 08:56:37 crc kubenswrapper[4940]: I1126 08:56:37.528142 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:37 crc kubenswrapper[4940]: I1126 08:56:37.531258 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-56cc65db64-2vj5l" event={"ID":"edb8e38b-3795-4451-9d4d-60f8ccd5bffc","Type":"ContainerStarted","Data":"588651100bd2b53259d739e3de70066d98d8eb10b7206983d1da3a9584b63f6c"} Nov 26 08:56:37 crc kubenswrapper[4940]: I1126 08:56:37.560530 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-754b7cd586-zs8p6" podStartSLOduration=2.560514805 podStartE2EDuration="2.560514805s" podCreationTimestamp="2025-11-26 08:56:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:56:37.56036138 +0000 UTC m=+7299.080502999" watchObservedRunningTime="2025-11-26 08:56:37.560514805 +0000 UTC m=+7299.080656424" Nov 26 08:56:38 crc kubenswrapper[4940]: I1126 08:56:38.560542 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6575b86469-nmbzf" Nov 26 08:56:38 crc kubenswrapper[4940]: I1126 08:56:38.629395 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-98d9954b9-rg54m"] Nov 26 08:56:38 crc kubenswrapper[4940]: I1126 08:56:38.629617 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-98d9954b9-rg54m" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon-log" containerID="cri-o://c1176880b242fbf7ef76c3c0ec0f46f8f63237c361defc3801232ee37dc8f07f" gracePeriod=30 Nov 26 08:56:38 crc kubenswrapper[4940]: I1126 08:56:38.630085 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-98d9954b9-rg54m" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon" containerID="cri-o://d7a351c5bebb5c14a7dea54674691acc71e2bf2e2297cb58c110286f3cc09eb5" gracePeriod=30 Nov 26 08:56:39 crc kubenswrapper[4940]: I1126 08:56:39.556308 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6ddd77f754-z9z4k" event={"ID":"ed206b9d-81f1-49bf-9dc1-d17d76ec052a","Type":"ContainerStarted","Data":"c36d4bdca9ae81eeb6ad6b5910d6b8a8ee3b2cec879ee3225ff03c9c92e1b9dd"} Nov 26 08:56:39 crc kubenswrapper[4940]: I1126 08:56:39.556652 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:39 crc kubenswrapper[4940]: I1126 08:56:39.558851 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-56cc65db64-2vj5l" event={"ID":"edb8e38b-3795-4451-9d4d-60f8ccd5bffc","Type":"ContainerStarted","Data":"9e832b99aec33ef0f11d10f61ba94abf80cd5db762b803acb333e602a3671bb8"} Nov 26 08:56:39 crc kubenswrapper[4940]: I1126 08:56:39.559750 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:39 crc kubenswrapper[4940]: I1126 08:56:39.604514 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-6ddd77f754-z9z4k" podStartSLOduration=3.011878322 podStartE2EDuration="4.604490517s" podCreationTimestamp="2025-11-26 08:56:35 +0000 UTC" firstStartedPulling="2025-11-26 08:56:36.746742901 +0000 UTC m=+7298.266884520" lastFinishedPulling="2025-11-26 08:56:38.339355096 +0000 UTC m=+7299.859496715" observedRunningTime="2025-11-26 08:56:39.582569623 +0000 UTC m=+7301.102711242" watchObservedRunningTime="2025-11-26 08:56:39.604490517 +0000 UTC m=+7301.124632136" Nov 26 08:56:39 crc kubenswrapper[4940]: I1126 08:56:39.612725 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-56cc65db64-2vj5l" podStartSLOduration=3.096046677 podStartE2EDuration="4.612700211s" podCreationTimestamp="2025-11-26 08:56:35 +0000 UTC" firstStartedPulling="2025-11-26 08:56:36.824298513 +0000 UTC m=+7298.344440132" lastFinishedPulling="2025-11-26 08:56:38.340952047 +0000 UTC m=+7299.861093666" observedRunningTime="2025-11-26 08:56:39.602875646 +0000 UTC m=+7301.123017265" watchObservedRunningTime="2025-11-26 08:56:39.612700211 +0000 UTC m=+7301.132841830" Nov 26 08:56:42 crc kubenswrapper[4940]: I1126 08:56:42.596726 4940 generic.go:334] "Generic (PLEG): container finished" podID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerID="d7a351c5bebb5c14a7dea54674691acc71e2bf2e2297cb58c110286f3cc09eb5" exitCode=0 Nov 26 08:56:42 crc kubenswrapper[4940]: I1126 08:56:42.596782 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-98d9954b9-rg54m" event={"ID":"7127829b-b2bf-48fe-866d-397b4628bcd6","Type":"ContainerDied","Data":"d7a351c5bebb5c14a7dea54674691acc71e2bf2e2297cb58c110286f3cc09eb5"} Nov 26 08:56:44 crc kubenswrapper[4940]: I1126 08:56:44.051309 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-8rk6t"] Nov 26 08:56:44 crc kubenswrapper[4940]: I1126 08:56:44.060650 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-b0c5-account-create-update-z7rdr"] Nov 26 08:56:44 crc kubenswrapper[4940]: I1126 08:56:44.072534 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-8rk6t"] Nov 26 08:56:44 crc kubenswrapper[4940]: I1126 08:56:44.083159 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-b0c5-account-create-update-z7rdr"] Nov 26 08:56:45 crc kubenswrapper[4940]: I1126 08:56:45.183808 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29328f7e-482f-4011-80b6-146c887cdc3c" path="/var/lib/kubelet/pods/29328f7e-482f-4011-80b6-146c887cdc3c/volumes" Nov 26 08:56:45 crc kubenswrapper[4940]: I1126 08:56:45.185133 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d065c95a-4ae1-499c-ba83-64a4b317c524" path="/var/lib/kubelet/pods/d065c95a-4ae1-499c-ba83-64a4b317c524/volumes" Nov 26 08:56:46 crc kubenswrapper[4940]: I1126 08:56:46.097361 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-754b7cd586-zs8p6" Nov 26 08:56:47 crc kubenswrapper[4940]: I1126 08:56:47.633306 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-6ddd77f754-z9z4k" Nov 26 08:56:47 crc kubenswrapper[4940]: I1126 08:56:47.706048 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-56cc65db64-2vj5l" Nov 26 08:56:47 crc kubenswrapper[4940]: I1126 08:56:47.749421 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-98d9954b9-rg54m" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.137:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.137:8080: connect: connection refused" Nov 26 08:56:51 crc kubenswrapper[4940]: I1126 08:56:51.727880 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:56:51 crc kubenswrapper[4940]: I1126 08:56:51.728409 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:56:55 crc kubenswrapper[4940]: I1126 08:56:55.031452 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-54ngd"] Nov 26 08:56:55 crc kubenswrapper[4940]: I1126 08:56:55.043272 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-54ngd"] Nov 26 08:56:55 crc kubenswrapper[4940]: I1126 08:56:55.182985 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0460ba04-08aa-4afe-9125-f1eb105161a3" path="/var/lib/kubelet/pods/0460ba04-08aa-4afe-9125-f1eb105161a3/volumes" Nov 26 08:56:56 crc kubenswrapper[4940]: I1126 08:56:56.816103 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf"] Nov 26 08:56:56 crc kubenswrapper[4940]: I1126 08:56:56.818723 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:56 crc kubenswrapper[4940]: I1126 08:56:56.821271 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 08:56:56 crc kubenswrapper[4940]: I1126 08:56:56.835135 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf"] Nov 26 08:56:56 crc kubenswrapper[4940]: I1126 08:56:56.926633 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:56 crc kubenswrapper[4940]: I1126 08:56:56.927030 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:56 crc kubenswrapper[4940]: I1126 08:56:56.927102 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqcz5\" (UniqueName: \"kubernetes.io/projected/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-kube-api-access-jqcz5\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.028741 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.028808 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqcz5\" (UniqueName: \"kubernetes.io/projected/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-kube-api-access-jqcz5\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.028909 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.029490 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.030424 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.054225 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqcz5\" (UniqueName: \"kubernetes.io/projected/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-kube-api-access-jqcz5\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.144497 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.613932 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf"] Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.745953 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" event={"ID":"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af","Type":"ContainerStarted","Data":"61473e8ab14c385d658adb9624a158b0ceb2864904411390109c0e3374071ae8"} Nov 26 08:56:57 crc kubenswrapper[4940]: I1126 08:56:57.749544 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-98d9954b9-rg54m" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.137:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.137:8080: connect: connection refused" Nov 26 08:56:58 crc kubenswrapper[4940]: I1126 08:56:58.760540 4940 generic.go:334] "Generic (PLEG): container finished" podID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerID="95f9baa074f694f7bc7acf5725544bb8b3c89dfcf27b9a6c13c3bcd8fbb8f644" exitCode=0 Nov 26 08:56:58 crc kubenswrapper[4940]: I1126 08:56:58.760608 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" event={"ID":"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af","Type":"ContainerDied","Data":"95f9baa074f694f7bc7acf5725544bb8b3c89dfcf27b9a6c13c3bcd8fbb8f644"} Nov 26 08:57:00 crc kubenswrapper[4940]: I1126 08:57:00.780802 4940 generic.go:334] "Generic (PLEG): container finished" podID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerID="95b243a56a416c6e60d6650f3446e723e33c50832b449cd8706b0453662967b5" exitCode=0 Nov 26 08:57:00 crc kubenswrapper[4940]: I1126 08:57:00.780866 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" event={"ID":"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af","Type":"ContainerDied","Data":"95b243a56a416c6e60d6650f3446e723e33c50832b449cd8706b0453662967b5"} Nov 26 08:57:01 crc kubenswrapper[4940]: I1126 08:57:01.793970 4940 generic.go:334] "Generic (PLEG): container finished" podID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerID="5a9501657710b65f307887a09d858d3ae93f92d236c835d9827ac09fe384eee4" exitCode=0 Nov 26 08:57:01 crc kubenswrapper[4940]: I1126 08:57:01.794010 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" event={"ID":"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af","Type":"ContainerDied","Data":"5a9501657710b65f307887a09d858d3ae93f92d236c835d9827ac09fe384eee4"} Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.316693 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.362867 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-bundle\") pod \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.362994 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-util\") pod \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.363086 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqcz5\" (UniqueName: \"kubernetes.io/projected/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-kube-api-access-jqcz5\") pod \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\" (UID: \"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af\") " Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.365726 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-bundle" (OuterVolumeSpecName: "bundle") pod "fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" (UID: "fea8b3a9-1dfd-4026-bd0f-c8940c00d8af"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.369150 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-kube-api-access-jqcz5" (OuterVolumeSpecName: "kube-api-access-jqcz5") pod "fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" (UID: "fea8b3a9-1dfd-4026-bd0f-c8940c00d8af"). InnerVolumeSpecName "kube-api-access-jqcz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.370597 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-util" (OuterVolumeSpecName: "util") pod "fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" (UID: "fea8b3a9-1dfd-4026-bd0f-c8940c00d8af"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.466477 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqcz5\" (UniqueName: \"kubernetes.io/projected/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-kube-api-access-jqcz5\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.466745 4940 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.466833 4940 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fea8b3a9-1dfd-4026-bd0f-c8940c00d8af-util\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.825260 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" event={"ID":"fea8b3a9-1dfd-4026-bd0f-c8940c00d8af","Type":"ContainerDied","Data":"61473e8ab14c385d658adb9624a158b0ceb2864904411390109c0e3374071ae8"} Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.825334 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61473e8ab14c385d658adb9624a158b0ceb2864904411390109c0e3374071ae8" Nov 26 08:57:03 crc kubenswrapper[4940]: I1126 08:57:03.825354 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf" Nov 26 08:57:07 crc kubenswrapper[4940]: I1126 08:57:07.749691 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-98d9954b9-rg54m" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.137:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.137:8080: connect: connection refused" Nov 26 08:57:07 crc kubenswrapper[4940]: I1126 08:57:07.750366 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:57:08 crc kubenswrapper[4940]: I1126 08:57:08.988519 4940 generic.go:334] "Generic (PLEG): container finished" podID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerID="c1176880b242fbf7ef76c3c0ec0f46f8f63237c361defc3801232ee37dc8f07f" exitCode=137 Nov 26 08:57:08 crc kubenswrapper[4940]: I1126 08:57:08.988685 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-98d9954b9-rg54m" event={"ID":"7127829b-b2bf-48fe-866d-397b4628bcd6","Type":"ContainerDied","Data":"c1176880b242fbf7ef76c3c0ec0f46f8f63237c361defc3801232ee37dc8f07f"} Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.807382 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.918597 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8c7z\" (UniqueName: \"kubernetes.io/projected/7127829b-b2bf-48fe-866d-397b4628bcd6-kube-api-access-s8c7z\") pod \"7127829b-b2bf-48fe-866d-397b4628bcd6\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.918656 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-config-data\") pod \"7127829b-b2bf-48fe-866d-397b4628bcd6\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.918691 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7127829b-b2bf-48fe-866d-397b4628bcd6-logs\") pod \"7127829b-b2bf-48fe-866d-397b4628bcd6\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.918771 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-scripts\") pod \"7127829b-b2bf-48fe-866d-397b4628bcd6\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.918837 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7127829b-b2bf-48fe-866d-397b4628bcd6-horizon-secret-key\") pod \"7127829b-b2bf-48fe-866d-397b4628bcd6\" (UID: \"7127829b-b2bf-48fe-866d-397b4628bcd6\") " Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.919348 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7127829b-b2bf-48fe-866d-397b4628bcd6-logs" (OuterVolumeSpecName: "logs") pod "7127829b-b2bf-48fe-866d-397b4628bcd6" (UID: "7127829b-b2bf-48fe-866d-397b4628bcd6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.919839 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7127829b-b2bf-48fe-866d-397b4628bcd6-logs\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.929531 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7127829b-b2bf-48fe-866d-397b4628bcd6-kube-api-access-s8c7z" (OuterVolumeSpecName: "kube-api-access-s8c7z") pod "7127829b-b2bf-48fe-866d-397b4628bcd6" (UID: "7127829b-b2bf-48fe-866d-397b4628bcd6"). InnerVolumeSpecName "kube-api-access-s8c7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.963198 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7127829b-b2bf-48fe-866d-397b4628bcd6-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "7127829b-b2bf-48fe-866d-397b4628bcd6" (UID: "7127829b-b2bf-48fe-866d-397b4628bcd6"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:57:09 crc kubenswrapper[4940]: I1126 08:57:09.986110 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-scripts" (OuterVolumeSpecName: "scripts") pod "7127829b-b2bf-48fe-866d-397b4628bcd6" (UID: "7127829b-b2bf-48fe-866d-397b4628bcd6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.004642 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-config-data" (OuterVolumeSpecName: "config-data") pod "7127829b-b2bf-48fe-866d-397b4628bcd6" (UID: "7127829b-b2bf-48fe-866d-397b4628bcd6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.027233 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.027271 4940 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7127829b-b2bf-48fe-866d-397b4628bcd6-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.027290 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8c7z\" (UniqueName: \"kubernetes.io/projected/7127829b-b2bf-48fe-866d-397b4628bcd6-kube-api-access-s8c7z\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.027300 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7127829b-b2bf-48fe-866d-397b4628bcd6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.032233 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-98d9954b9-rg54m" event={"ID":"7127829b-b2bf-48fe-866d-397b4628bcd6","Type":"ContainerDied","Data":"afab43604b722a710a0d915a27926e6e18d3481e2e4e254726e1d9ace47b81c9"} Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.032317 4940 scope.go:117] "RemoveContainer" containerID="d7a351c5bebb5c14a7dea54674691acc71e2bf2e2297cb58c110286f3cc09eb5" Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.032492 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-98d9954b9-rg54m" Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.151583 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-98d9954b9-rg54m"] Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.170709 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-98d9954b9-rg54m"] Nov 26 08:57:10 crc kubenswrapper[4940]: I1126 08:57:10.289481 4940 scope.go:117] "RemoveContainer" containerID="c1176880b242fbf7ef76c3c0ec0f46f8f63237c361defc3801232ee37dc8f07f" Nov 26 08:57:11 crc kubenswrapper[4940]: I1126 08:57:11.175635 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" path="/var/lib/kubelet/pods/7127829b-b2bf-48fe-866d-397b4628bcd6/volumes" Nov 26 08:57:12 crc kubenswrapper[4940]: I1126 08:57:12.992758 4940 scope.go:117] "RemoveContainer" containerID="45545a54eb13926dd3f34a502f30d4936a3be621f40e7caa586835bf657ee125" Nov 26 08:57:13 crc kubenswrapper[4940]: I1126 08:57:13.074014 4940 scope.go:117] "RemoveContainer" containerID="99e0a0c536159ce4f1407628eab4e22fda6bca1599755a9aeeae19b51556db79" Nov 26 08:57:13 crc kubenswrapper[4940]: I1126 08:57:13.102918 4940 scope.go:117] "RemoveContainer" containerID="3816559f4bd9844a4e2d1aee689aa7ccd9927eed247747ab2df268679da453b3" Nov 26 08:57:13 crc kubenswrapper[4940]: I1126 08:57:13.131900 4940 scope.go:117] "RemoveContainer" containerID="bf0fb024cdc94dd3d4c72f7b2e81cfb94d3d5c94f0ba82ff8dc58168c5d31bac" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.043330 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7"] Nov 26 08:57:16 crc kubenswrapper[4940]: E1126 08:57:16.044254 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon-log" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.044269 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon-log" Nov 26 08:57:16 crc kubenswrapper[4940]: E1126 08:57:16.044298 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerName="util" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.044304 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerName="util" Nov 26 08:57:16 crc kubenswrapper[4940]: E1126 08:57:16.044314 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerName="pull" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.044319 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerName="pull" Nov 26 08:57:16 crc kubenswrapper[4940]: E1126 08:57:16.044331 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerName="extract" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.044337 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerName="extract" Nov 26 08:57:16 crc kubenswrapper[4940]: E1126 08:57:16.044350 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.044357 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.044549 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fea8b3a9-1dfd-4026-bd0f-c8940c00d8af" containerName="extract" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.044567 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon-log" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.044584 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="7127829b-b2bf-48fe-866d-397b4628bcd6" containerName="horizon" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.045465 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.048880 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.049322 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.049557 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-frgdz" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.067164 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.142035 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6727\" (UniqueName: \"kubernetes.io/projected/9d602305-12d4-40ac-a76d-4cb2dc13381f-kube-api-access-j6727\") pod \"obo-prometheus-operator-668cf9dfbb-6dwh7\" (UID: \"9d602305-12d4-40ac-a76d-4cb2dc13381f\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.230116 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.238050 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.246096 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-x95hf" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.247211 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6727\" (UniqueName: \"kubernetes.io/projected/9d602305-12d4-40ac-a76d-4cb2dc13381f-kube-api-access-j6727\") pod \"obo-prometheus-operator-668cf9dfbb-6dwh7\" (UID: \"9d602305-12d4-40ac-a76d-4cb2dc13381f\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.249024 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.259900 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.271789 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.273139 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.278366 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6727\" (UniqueName: \"kubernetes.io/projected/9d602305-12d4-40ac-a76d-4cb2dc13381f-kube-api-access-j6727\") pod \"obo-prometheus-operator-668cf9dfbb-6dwh7\" (UID: \"9d602305-12d4-40ac-a76d-4cb2dc13381f\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.281447 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.349143 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dff01c39-1a77-43f8-a784-9ec5dff28a90-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-r4z7j\" (UID: \"dff01c39-1a77-43f8-a784-9ec5dff28a90\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.349218 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/760eb996-e2fd-47b9-a005-280453befb3f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-b5b64\" (UID: \"760eb996-e2fd-47b9-a005-280453befb3f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.349443 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dff01c39-1a77-43f8-a784-9ec5dff28a90-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-r4z7j\" (UID: \"dff01c39-1a77-43f8-a784-9ec5dff28a90\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.349526 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/760eb996-e2fd-47b9-a005-280453befb3f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-b5b64\" (UID: \"760eb996-e2fd-47b9-a005-280453befb3f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.361721 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-tqmf7"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.363455 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.363801 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.365853 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.366169 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-28ssp" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.375923 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-tqmf7"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.451308 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dff01c39-1a77-43f8-a784-9ec5dff28a90-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-r4z7j\" (UID: \"dff01c39-1a77-43f8-a784-9ec5dff28a90\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.451653 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/7a938eea-838c-424d-9d89-5b5978dfcf79-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-tqmf7\" (UID: \"7a938eea-838c-424d-9d89-5b5978dfcf79\") " pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.451687 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/760eb996-e2fd-47b9-a005-280453befb3f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-b5b64\" (UID: \"760eb996-e2fd-47b9-a005-280453befb3f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.451791 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dff01c39-1a77-43f8-a784-9ec5dff28a90-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-r4z7j\" (UID: \"dff01c39-1a77-43f8-a784-9ec5dff28a90\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.451840 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/760eb996-e2fd-47b9-a005-280453befb3f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-b5b64\" (UID: \"760eb996-e2fd-47b9-a005-280453befb3f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.451943 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdzt6\" (UniqueName: \"kubernetes.io/projected/7a938eea-838c-424d-9d89-5b5978dfcf79-kube-api-access-hdzt6\") pod \"observability-operator-d8bb48f5d-tqmf7\" (UID: \"7a938eea-838c-424d-9d89-5b5978dfcf79\") " pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.457502 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dff01c39-1a77-43f8-a784-9ec5dff28a90-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-r4z7j\" (UID: \"dff01c39-1a77-43f8-a784-9ec5dff28a90\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.460389 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/760eb996-e2fd-47b9-a005-280453befb3f-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-b5b64\" (UID: \"760eb996-e2fd-47b9-a005-280453befb3f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.460720 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dff01c39-1a77-43f8-a784-9ec5dff28a90-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-r4z7j\" (UID: \"dff01c39-1a77-43f8-a784-9ec5dff28a90\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.462618 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/760eb996-e2fd-47b9-a005-280453befb3f-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6d66954595-b5b64\" (UID: \"760eb996-e2fd-47b9-a005-280453befb3f\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.484662 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qvkhc"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.486026 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.488268 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-sh8g4" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.508424 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qvkhc"] Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.554310 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/6bd0a4fb-63e0-4e35-b671-f4648b20cf4e-openshift-service-ca\") pod \"perses-operator-5446b9c989-qvkhc\" (UID: \"6bd0a4fb-63e0-4e35-b671-f4648b20cf4e\") " pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.554351 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z92l7\" (UniqueName: \"kubernetes.io/projected/6bd0a4fb-63e0-4e35-b671-f4648b20cf4e-kube-api-access-z92l7\") pod \"perses-operator-5446b9c989-qvkhc\" (UID: \"6bd0a4fb-63e0-4e35-b671-f4648b20cf4e\") " pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.554683 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdzt6\" (UniqueName: \"kubernetes.io/projected/7a938eea-838c-424d-9d89-5b5978dfcf79-kube-api-access-hdzt6\") pod \"observability-operator-d8bb48f5d-tqmf7\" (UID: \"7a938eea-838c-424d-9d89-5b5978dfcf79\") " pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.554803 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/7a938eea-838c-424d-9d89-5b5978dfcf79-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-tqmf7\" (UID: \"7a938eea-838c-424d-9d89-5b5978dfcf79\") " pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.559166 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/7a938eea-838c-424d-9d89-5b5978dfcf79-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-tqmf7\" (UID: \"7a938eea-838c-424d-9d89-5b5978dfcf79\") " pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.561311 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.576239 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdzt6\" (UniqueName: \"kubernetes.io/projected/7a938eea-838c-424d-9d89-5b5978dfcf79-kube-api-access-hdzt6\") pod \"observability-operator-d8bb48f5d-tqmf7\" (UID: \"7a938eea-838c-424d-9d89-5b5978dfcf79\") " pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.638583 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.658257 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/6bd0a4fb-63e0-4e35-b671-f4648b20cf4e-openshift-service-ca\") pod \"perses-operator-5446b9c989-qvkhc\" (UID: \"6bd0a4fb-63e0-4e35-b671-f4648b20cf4e\") " pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.658306 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z92l7\" (UniqueName: \"kubernetes.io/projected/6bd0a4fb-63e0-4e35-b671-f4648b20cf4e-kube-api-access-z92l7\") pod \"perses-operator-5446b9c989-qvkhc\" (UID: \"6bd0a4fb-63e0-4e35-b671-f4648b20cf4e\") " pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.659394 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/6bd0a4fb-63e0-4e35-b671-f4648b20cf4e-openshift-service-ca\") pod \"perses-operator-5446b9c989-qvkhc\" (UID: \"6bd0a4fb-63e0-4e35-b671-f4648b20cf4e\") " pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.690631 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.694195 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z92l7\" (UniqueName: \"kubernetes.io/projected/6bd0a4fb-63e0-4e35-b671-f4648b20cf4e-kube-api-access-z92l7\") pod \"perses-operator-5446b9c989-qvkhc\" (UID: \"6bd0a4fb-63e0-4e35-b671-f4648b20cf4e\") " pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.861975 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:16 crc kubenswrapper[4940]: I1126 08:57:16.968494 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7"] Nov 26 08:57:16 crc kubenswrapper[4940]: W1126 08:57:16.987991 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d602305_12d4_40ac_a76d_4cb2dc13381f.slice/crio-bb452973bd2a57a78a6d992efd564d21f3b04774dd215ee641ad692e13e507f7 WatchSource:0}: Error finding container bb452973bd2a57a78a6d992efd564d21f3b04774dd215ee641ad692e13e507f7: Status 404 returned error can't find the container with id bb452973bd2a57a78a6d992efd564d21f3b04774dd215ee641ad692e13e507f7 Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.103090 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-47c1-account-create-update-xqm5m"] Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.130523 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7" event={"ID":"9d602305-12d4-40ac-a76d-4cb2dc13381f","Type":"ContainerStarted","Data":"bb452973bd2a57a78a6d992efd564d21f3b04774dd215ee641ad692e13e507f7"} Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.144969 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-n9lc2"] Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.153065 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-47c1-account-create-update-xqm5m"] Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.160636 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-n9lc2"] Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.176019 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a9e3e01-69f3-4d3c-8586-2f70ca4083d1" path="/var/lib/kubelet/pods/7a9e3e01-69f3-4d3c-8586-2f70ca4083d1/volumes" Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.176761 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe133454-afe8-4208-ba1a-86f87d1a0837" path="/var/lib/kubelet/pods/fe133454-afe8-4208-ba1a-86f87d1a0837/volumes" Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.458689 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64"] Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.598662 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j"] Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.603343 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-tqmf7"] Nov 26 08:57:17 crc kubenswrapper[4940]: I1126 08:57:17.755975 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qvkhc"] Nov 26 08:57:17 crc kubenswrapper[4940]: W1126 08:57:17.761829 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6bd0a4fb_63e0_4e35_b671_f4648b20cf4e.slice/crio-cc9856277e88daa3fa53e901fe61f9ca686b98c72218497f46d22348115e6900 WatchSource:0}: Error finding container cc9856277e88daa3fa53e901fe61f9ca686b98c72218497f46d22348115e6900: Status 404 returned error can't find the container with id cc9856277e88daa3fa53e901fe61f9ca686b98c72218497f46d22348115e6900 Nov 26 08:57:18 crc kubenswrapper[4940]: I1126 08:57:18.179602 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" event={"ID":"760eb996-e2fd-47b9-a005-280453befb3f","Type":"ContainerStarted","Data":"cc53d893b2d35d72dfa6ec003fd15c3349087e7a63befbeb96dff02f0512a9b2"} Nov 26 08:57:18 crc kubenswrapper[4940]: I1126 08:57:18.186396 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-qvkhc" event={"ID":"6bd0a4fb-63e0-4e35-b671-f4648b20cf4e","Type":"ContainerStarted","Data":"cc9856277e88daa3fa53e901fe61f9ca686b98c72218497f46d22348115e6900"} Nov 26 08:57:18 crc kubenswrapper[4940]: I1126 08:57:18.197094 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" event={"ID":"7a938eea-838c-424d-9d89-5b5978dfcf79","Type":"ContainerStarted","Data":"44bdd8cf8e61abe955c9721b2ce7a5af7f50674f4e5adae980394adfc8a77a22"} Nov 26 08:57:18 crc kubenswrapper[4940]: I1126 08:57:18.200699 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" event={"ID":"dff01c39-1a77-43f8-a784-9ec5dff28a90","Type":"ContainerStarted","Data":"c7af0e0274e8ec604f9f1d82bf0021b2994d7f6ec02d6621cad3bf879610e3da"} Nov 26 08:57:21 crc kubenswrapper[4940]: I1126 08:57:21.728361 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:57:21 crc kubenswrapper[4940]: I1126 08:57:21.728823 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:57:26 crc kubenswrapper[4940]: I1126 08:57:26.037072 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-z78qf"] Nov 26 08:57:26 crc kubenswrapper[4940]: I1126 08:57:26.045509 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-z78qf"] Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.177179 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3d251fc-2e47-4381-bb73-05ff1a5753b2" path="/var/lib/kubelet/pods/a3d251fc-2e47-4381-bb73-05ff1a5753b2/volumes" Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.344713 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" event={"ID":"7a938eea-838c-424d-9d89-5b5978dfcf79","Type":"ContainerStarted","Data":"9e1c711ccabbbbb5bb7d3590c30888d57bae99926812e9789d0953a974fcf70e"} Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.345619 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.347719 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" event={"ID":"dff01c39-1a77-43f8-a784-9ec5dff28a90","Type":"ContainerStarted","Data":"47dffdc49e9e826ce79d77cf9b9cd18ce1e12eef045ad3c94bc1dce57dd1229b"} Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.349388 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.352096 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" event={"ID":"760eb996-e2fd-47b9-a005-280453befb3f","Type":"ContainerStarted","Data":"32e415144a34f4a621fa47006361f46a8423c9b8ae266baaf1c84ac0213e0430"} Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.354148 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-qvkhc" event={"ID":"6bd0a4fb-63e0-4e35-b671-f4648b20cf4e","Type":"ContainerStarted","Data":"5f9ac99c6d06e657346cdff463c5e770f384b5cddbb45dee5bc9425a3149c70b"} Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.355062 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.357318 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7" event={"ID":"9d602305-12d4-40ac-a76d-4cb2dc13381f","Type":"ContainerStarted","Data":"dd955b37251a70e20460c4e3b656c281bb130873ee6ac3ae51f5a31c4a656764"} Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.378922 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-tqmf7" podStartSLOduration=2.700663779 podStartE2EDuration="11.378901863s" podCreationTimestamp="2025-11-26 08:57:16 +0000 UTC" firstStartedPulling="2025-11-26 08:57:17.613456636 +0000 UTC m=+7339.133598255" lastFinishedPulling="2025-11-26 08:57:26.29169472 +0000 UTC m=+7347.811836339" observedRunningTime="2025-11-26 08:57:27.37508299 +0000 UTC m=+7348.895224609" watchObservedRunningTime="2025-11-26 08:57:27.378901863 +0000 UTC m=+7348.899043472" Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.403866 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-b5b64" podStartSLOduration=2.77414003 podStartE2EDuration="11.403844254s" podCreationTimestamp="2025-11-26 08:57:16 +0000 UTC" firstStartedPulling="2025-11-26 08:57:17.494886236 +0000 UTC m=+7339.015027855" lastFinishedPulling="2025-11-26 08:57:26.12459045 +0000 UTC m=+7347.644732079" observedRunningTime="2025-11-26 08:57:27.398973688 +0000 UTC m=+7348.919115307" watchObservedRunningTime="2025-11-26 08:57:27.403844254 +0000 UTC m=+7348.923985873" Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.461412 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-qvkhc" podStartSLOduration=2.980783501 podStartE2EDuration="11.461385854s" podCreationTimestamp="2025-11-26 08:57:16 +0000 UTC" firstStartedPulling="2025-11-26 08:57:17.764091807 +0000 UTC m=+7339.284233426" lastFinishedPulling="2025-11-26 08:57:26.24469416 +0000 UTC m=+7347.764835779" observedRunningTime="2025-11-26 08:57:27.42453793 +0000 UTC m=+7348.944679549" watchObservedRunningTime="2025-11-26 08:57:27.461385854 +0000 UTC m=+7348.981527473" Nov 26 08:57:27 crc kubenswrapper[4940]: I1126 08:57:27.519591 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6d66954595-r4z7j" podStartSLOduration=2.996823737 podStartE2EDuration="11.519568154s" podCreationTimestamp="2025-11-26 08:57:16 +0000 UTC" firstStartedPulling="2025-11-26 08:57:17.603450905 +0000 UTC m=+7339.123592524" lastFinishedPulling="2025-11-26 08:57:26.126195312 +0000 UTC m=+7347.646336941" observedRunningTime="2025-11-26 08:57:27.478240766 +0000 UTC m=+7348.998382395" watchObservedRunningTime="2025-11-26 08:57:27.519568154 +0000 UTC m=+7349.039709773" Nov 26 08:57:36 crc kubenswrapper[4940]: I1126 08:57:36.866237 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-qvkhc" Nov 26 08:57:36 crc kubenswrapper[4940]: I1126 08:57:36.886803 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-6dwh7" podStartSLOduration=11.752956685000001 podStartE2EDuration="20.886787122s" podCreationTimestamp="2025-11-26 08:57:16 +0000 UTC" firstStartedPulling="2025-11-26 08:57:16.99191048 +0000 UTC m=+7338.512052099" lastFinishedPulling="2025-11-26 08:57:26.125740907 +0000 UTC m=+7347.645882536" observedRunningTime="2025-11-26 08:57:27.519672027 +0000 UTC m=+7349.039813646" watchObservedRunningTime="2025-11-26 08:57:36.886787122 +0000 UTC m=+7358.406928741" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.721964 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.722764 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="687afc0a-79a8-492c-a626-44e95c547d23" containerName="openstackclient" containerID="cri-o://742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1" gracePeriod=2 Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.738163 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.762115 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 08:57:39 crc kubenswrapper[4940]: E1126 08:57:39.762609 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="687afc0a-79a8-492c-a626-44e95c547d23" containerName="openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.762633 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="687afc0a-79a8-492c-a626-44e95c547d23" containerName="openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.762913 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="687afc0a-79a8-492c-a626-44e95c547d23" containerName="openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.763777 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.769256 4940 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="687afc0a-79a8-492c-a626-44e95c547d23" podUID="70e630f5-14ad-4165-b133-30973d9125d8" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.780988 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.841437 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/70e630f5-14ad-4165-b133-30973d9125d8-openstack-config\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.841599 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvfh2\" (UniqueName: \"kubernetes.io/projected/70e630f5-14ad-4165-b133-30973d9125d8-kube-api-access-bvfh2\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.843513 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/70e630f5-14ad-4165-b133-30973d9125d8-openstack-config-secret\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.948768 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/70e630f5-14ad-4165-b133-30973d9125d8-openstack-config\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.949217 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvfh2\" (UniqueName: \"kubernetes.io/projected/70e630f5-14ad-4165-b133-30973d9125d8-kube-api-access-bvfh2\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.949316 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/70e630f5-14ad-4165-b133-30973d9125d8-openstack-config-secret\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.949625 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/70e630f5-14ad-4165-b133-30973d9125d8-openstack-config\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.965935 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/70e630f5-14ad-4165-b133-30973d9125d8-openstack-config-secret\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:39 crc kubenswrapper[4940]: I1126 08:57:39.976747 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvfh2\" (UniqueName: \"kubernetes.io/projected/70e630f5-14ad-4165-b133-30973d9125d8-kube-api-access-bvfh2\") pod \"openstackclient\" (UID: \"70e630f5-14ad-4165-b133-30973d9125d8\") " pod="openstack/openstackclient" Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.034819 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.036531 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.052460 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.063168 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-gsk4n" Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.147691 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.153652 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmccj\" (UniqueName: \"kubernetes.io/projected/5171ec99-793d-4fc4-80cd-8e90577c4618-kube-api-access-wmccj\") pod \"kube-state-metrics-0\" (UID: \"5171ec99-793d-4fc4-80cd-8e90577c4618\") " pod="openstack/kube-state-metrics-0" Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.255056 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmccj\" (UniqueName: \"kubernetes.io/projected/5171ec99-793d-4fc4-80cd-8e90577c4618-kube-api-access-wmccj\") pod \"kube-state-metrics-0\" (UID: \"5171ec99-793d-4fc4-80cd-8e90577c4618\") " pod="openstack/kube-state-metrics-0" Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.284062 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmccj\" (UniqueName: \"kubernetes.io/projected/5171ec99-793d-4fc4-80cd-8e90577c4618-kube-api-access-wmccj\") pod \"kube-state-metrics-0\" (UID: \"5171ec99-793d-4fc4-80cd-8e90577c4618\") " pod="openstack/kube-state-metrics-0" Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.404186 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 08:57:40 crc kubenswrapper[4940]: I1126 08:57:40.937083 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.120969 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.515088 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5171ec99-793d-4fc4-80cd-8e90577c4618","Type":"ContainerStarted","Data":"e06b4f3535835b6fb4e9f57240f857ba32c08ba65b40cf75591378c5b78bbfe9"} Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.517006 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"70e630f5-14ad-4165-b133-30973d9125d8","Type":"ContainerStarted","Data":"2510bfd8e677529b947e681d091be812893f2f966e1f907fe6bce0c4d8ff408e"} Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.517567 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"70e630f5-14ad-4165-b133-30973d9125d8","Type":"ContainerStarted","Data":"f7838bda3c57bd7883b745be01ecd78819923454efa3dfc334df6dbc58fef17d"} Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.573808 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.57378923 podStartE2EDuration="2.57378923s" podCreationTimestamp="2025-11-26 08:57:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:57:41.564730029 +0000 UTC m=+7363.084871648" watchObservedRunningTime="2025-11-26 08:57:41.57378923 +0000 UTC m=+7363.093930849" Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.966787 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.969206 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.979360 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.979558 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.979662 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.983545 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-r2nz8" Nov 26 08:57:41 crc kubenswrapper[4940]: I1126 08:57:41.986740 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.009936 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.129551 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/80d71139-436a-4bad-b084-12b7d2e037c9-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.129814 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.129865 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.129912 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llhd5\" (UniqueName: \"kubernetes.io/projected/80d71139-436a-4bad-b084-12b7d2e037c9-kube-api-access-llhd5\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.129942 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/80d71139-436a-4bad-b084-12b7d2e037c9-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.129955 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/80d71139-436a-4bad-b084-12b7d2e037c9-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.129990 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.237253 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.237344 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llhd5\" (UniqueName: \"kubernetes.io/projected/80d71139-436a-4bad-b084-12b7d2e037c9-kube-api-access-llhd5\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.237389 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/80d71139-436a-4bad-b084-12b7d2e037c9-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.237410 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/80d71139-436a-4bad-b084-12b7d2e037c9-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.237456 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.237550 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/80d71139-436a-4bad-b084-12b7d2e037c9-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.237584 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.255915 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.256294 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/80d71139-436a-4bad-b084-12b7d2e037c9-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.280857 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/80d71139-436a-4bad-b084-12b7d2e037c9-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.282478 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/80d71139-436a-4bad-b084-12b7d2e037c9-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.283619 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.300643 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/80d71139-436a-4bad-b084-12b7d2e037c9-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.307564 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llhd5\" (UniqueName: \"kubernetes.io/projected/80d71139-436a-4bad-b084-12b7d2e037c9-kube-api-access-llhd5\") pod \"alertmanager-metric-storage-0\" (UID: \"80d71139-436a-4bad-b084-12b7d2e037c9\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.373774 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.429809 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.555511 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5fhm\" (UniqueName: \"kubernetes.io/projected/687afc0a-79a8-492c-a626-44e95c547d23-kube-api-access-m5fhm\") pod \"687afc0a-79a8-492c-a626-44e95c547d23\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.555644 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config\") pod \"687afc0a-79a8-492c-a626-44e95c547d23\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.555770 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config-secret\") pod \"687afc0a-79a8-492c-a626-44e95c547d23\" (UID: \"687afc0a-79a8-492c-a626-44e95c547d23\") " Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.570232 4940 generic.go:334] "Generic (PLEG): container finished" podID="687afc0a-79a8-492c-a626-44e95c547d23" containerID="742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1" exitCode=137 Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.570631 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.571140 4940 scope.go:117] "RemoveContainer" containerID="742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.595276 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/687afc0a-79a8-492c-a626-44e95c547d23-kube-api-access-m5fhm" (OuterVolumeSpecName: "kube-api-access-m5fhm") pod "687afc0a-79a8-492c-a626-44e95c547d23" (UID: "687afc0a-79a8-492c-a626-44e95c547d23"). InnerVolumeSpecName "kube-api-access-m5fhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.601499 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5171ec99-793d-4fc4-80cd-8e90577c4618","Type":"ContainerStarted","Data":"4bb7b63c18a53a843d7ae563f382f48aa4a942030b207e3a5a4648bd66943ab3"} Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.601557 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.614963 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.617756 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.620967 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.621523 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.621810 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.623866 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.631471 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.649818 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-z5kw9" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.653114 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.661221 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5fhm\" (UniqueName: \"kubernetes.io/projected/687afc0a-79a8-492c-a626-44e95c547d23-kube-api-access-m5fhm\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.683339 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "687afc0a-79a8-492c-a626-44e95c547d23" (UID: "687afc0a-79a8-492c-a626-44e95c547d23"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.683934 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "687afc0a-79a8-492c-a626-44e95c547d23" (UID: "687afc0a-79a8-492c-a626-44e95c547d23"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.685315 4940 scope.go:117] "RemoveContainer" containerID="742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.691847 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.286706482 podStartE2EDuration="3.691821162s" podCreationTimestamp="2025-11-26 08:57:39 +0000 UTC" firstStartedPulling="2025-11-26 08:57:41.144200933 +0000 UTC m=+7362.664342542" lastFinishedPulling="2025-11-26 08:57:41.549315603 +0000 UTC m=+7363.069457222" observedRunningTime="2025-11-26 08:57:42.647078434 +0000 UTC m=+7364.167220063" watchObservedRunningTime="2025-11-26 08:57:42.691821162 +0000 UTC m=+7364.211962781" Nov 26 08:57:42 crc kubenswrapper[4940]: E1126 08:57:42.699313 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1\": container with ID starting with 742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1 not found: ID does not exist" containerID="742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.699353 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1"} err="failed to get container status \"742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1\": rpc error: code = NotFound desc = could not find container \"742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1\": container with ID starting with 742260ef1d3a075ed2356b0be790fccea4c15f795cfb0c2551dc8bbf7af709a1 not found: ID does not exist" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.763353 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3b3736b9-b106-4b89-a513-d5e5440ce386-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.763487 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3b3736b9-b106-4b89-a513-d5e5440ce386-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.763529 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.763584 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3b3736b9-b106-4b89-a513-d5e5440ce386-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.763601 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4q9q\" (UniqueName: \"kubernetes.io/projected/3b3736b9-b106-4b89-a513-d5e5440ce386-kube-api-access-s4q9q\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.763789 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.763932 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-config\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.763972 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a515050b-278c-46fe-b246-6fc28c36340d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a515050b-278c-46fe-b246-6fc28c36340d\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.764118 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.764130 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/687afc0a-79a8-492c-a626-44e95c547d23-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.867848 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3b3736b9-b106-4b89-a513-d5e5440ce386-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.867898 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.867955 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3b3736b9-b106-4b89-a513-d5e5440ce386-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.867973 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4q9q\" (UniqueName: \"kubernetes.io/projected/3b3736b9-b106-4b89-a513-d5e5440ce386-kube-api-access-s4q9q\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.868019 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.868066 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-config\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.868098 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a515050b-278c-46fe-b246-6fc28c36340d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a515050b-278c-46fe-b246-6fc28c36340d\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.868132 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3b3736b9-b106-4b89-a513-d5e5440ce386-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.868734 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3b3736b9-b106-4b89-a513-d5e5440ce386-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.871667 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.872153 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.872273 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3b3736b9-b106-4b89-a513-d5e5440ce386-config\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.874564 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3b3736b9-b106-4b89-a513-d5e5440ce386-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.874886 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3b3736b9-b106-4b89-a513-d5e5440ce386-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.881858 4940 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.881908 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a515050b-278c-46fe-b246-6fc28c36340d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a515050b-278c-46fe-b246-6fc28c36340d\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/18f518296f88fea12928338dbcda7f2ddf779a8c1598536dba61d3735a859524/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.893935 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4q9q\" (UniqueName: \"kubernetes.io/projected/3b3736b9-b106-4b89-a513-d5e5440ce386-kube-api-access-s4q9q\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.914454 4940 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="687afc0a-79a8-492c-a626-44e95c547d23" podUID="70e630f5-14ad-4165-b133-30973d9125d8" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.936368 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a515050b-278c-46fe-b246-6fc28c36340d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a515050b-278c-46fe-b246-6fc28c36340d\") pod \"prometheus-metric-storage-0\" (UID: \"3b3736b9-b106-4b89-a513-d5e5440ce386\") " pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:42 crc kubenswrapper[4940]: I1126 08:57:42.983328 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 08:57:43 crc kubenswrapper[4940]: I1126 08:57:43.177455 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="687afc0a-79a8-492c-a626-44e95c547d23" path="/var/lib/kubelet/pods/687afc0a-79a8-492c-a626-44e95c547d23/volumes" Nov 26 08:57:43 crc kubenswrapper[4940]: I1126 08:57:43.224592 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 26 08:57:43 crc kubenswrapper[4940]: W1126 08:57:43.244550 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80d71139_436a_4bad_b084_12b7d2e037c9.slice/crio-62deb7094eabec79af8fec4d7c5d2746de13f64ecca566d8372298f5822c83e1 WatchSource:0}: Error finding container 62deb7094eabec79af8fec4d7c5d2746de13f64ecca566d8372298f5822c83e1: Status 404 returned error can't find the container with id 62deb7094eabec79af8fec4d7c5d2746de13f64ecca566d8372298f5822c83e1 Nov 26 08:57:43 crc kubenswrapper[4940]: I1126 08:57:43.525826 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 08:57:43 crc kubenswrapper[4940]: I1126 08:57:43.610212 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"80d71139-436a-4bad-b084-12b7d2e037c9","Type":"ContainerStarted","Data":"62deb7094eabec79af8fec4d7c5d2746de13f64ecca566d8372298f5822c83e1"} Nov 26 08:57:43 crc kubenswrapper[4940]: I1126 08:57:43.613742 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3b3736b9-b106-4b89-a513-d5e5440ce386","Type":"ContainerStarted","Data":"375cbce78ff9a7d5156caba8b992f7f911afc7e62142fb1f4c9a29493a36f845"} Nov 26 08:57:49 crc kubenswrapper[4940]: I1126 08:57:49.695394 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"80d71139-436a-4bad-b084-12b7d2e037c9","Type":"ContainerStarted","Data":"3714a50bca5bd16707d74f979a0ea775cf966503331b3abaad34e05a0458f0aa"} Nov 26 08:57:49 crc kubenswrapper[4940]: I1126 08:57:49.699349 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3b3736b9-b106-4b89-a513-d5e5440ce386","Type":"ContainerStarted","Data":"4f788212fdb4525cb31a505fc87d760feb01599f3849eab892e2348f96c56210"} Nov 26 08:57:50 crc kubenswrapper[4940]: I1126 08:57:50.409549 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 08:57:51 crc kubenswrapper[4940]: I1126 08:57:51.728823 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:57:51 crc kubenswrapper[4940]: I1126 08:57:51.729327 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:57:51 crc kubenswrapper[4940]: I1126 08:57:51.729391 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 08:57:51 crc kubenswrapper[4940]: I1126 08:57:51.730498 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:57:51 crc kubenswrapper[4940]: I1126 08:57:51.730604 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" gracePeriod=600 Nov 26 08:57:51 crc kubenswrapper[4940]: E1126 08:57:51.856405 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:57:52 crc kubenswrapper[4940]: I1126 08:57:52.736423 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" exitCode=0 Nov 26 08:57:52 crc kubenswrapper[4940]: I1126 08:57:52.736524 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f"} Nov 26 08:57:52 crc kubenswrapper[4940]: I1126 08:57:52.736850 4940 scope.go:117] "RemoveContainer" containerID="4097e33d00555a418131c81b7ef769c20a467eee44ca0e7c613ac70f0162336d" Nov 26 08:57:52 crc kubenswrapper[4940]: I1126 08:57:52.738123 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:57:52 crc kubenswrapper[4940]: E1126 08:57:52.738572 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:57:56 crc kubenswrapper[4940]: I1126 08:57:56.792141 4940 generic.go:334] "Generic (PLEG): container finished" podID="80d71139-436a-4bad-b084-12b7d2e037c9" containerID="3714a50bca5bd16707d74f979a0ea775cf966503331b3abaad34e05a0458f0aa" exitCode=0 Nov 26 08:57:56 crc kubenswrapper[4940]: I1126 08:57:56.792241 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"80d71139-436a-4bad-b084-12b7d2e037c9","Type":"ContainerDied","Data":"3714a50bca5bd16707d74f979a0ea775cf966503331b3abaad34e05a0458f0aa"} Nov 26 08:57:57 crc kubenswrapper[4940]: I1126 08:57:57.807712 4940 generic.go:334] "Generic (PLEG): container finished" podID="3b3736b9-b106-4b89-a513-d5e5440ce386" containerID="4f788212fdb4525cb31a505fc87d760feb01599f3849eab892e2348f96c56210" exitCode=0 Nov 26 08:57:57 crc kubenswrapper[4940]: I1126 08:57:57.807945 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3b3736b9-b106-4b89-a513-d5e5440ce386","Type":"ContainerDied","Data":"4f788212fdb4525cb31a505fc87d760feb01599f3849eab892e2348f96c56210"} Nov 26 08:57:59 crc kubenswrapper[4940]: I1126 08:57:59.833001 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"80d71139-436a-4bad-b084-12b7d2e037c9","Type":"ContainerStarted","Data":"c9795bb3c3dbcbddda7d78519e2227fa44a008cca1517812808f45f6706aa92a"} Nov 26 08:58:02 crc kubenswrapper[4940]: I1126 08:58:02.873845 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"80d71139-436a-4bad-b084-12b7d2e037c9","Type":"ContainerStarted","Data":"9838c52e3ff274c49f1419519cc919d77a001f9042fe5d73b37cd305baef7ac1"} Nov 26 08:58:02 crc kubenswrapper[4940]: I1126 08:58:02.874473 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Nov 26 08:58:02 crc kubenswrapper[4940]: I1126 08:58:02.878548 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Nov 26 08:58:02 crc kubenswrapper[4940]: I1126 08:58:02.906542 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=6.557107141 podStartE2EDuration="21.906524553s" podCreationTimestamp="2025-11-26 08:57:41 +0000 UTC" firstStartedPulling="2025-11-26 08:57:43.249508516 +0000 UTC m=+7364.769650135" lastFinishedPulling="2025-11-26 08:57:58.598925928 +0000 UTC m=+7380.119067547" observedRunningTime="2025-11-26 08:58:02.897031548 +0000 UTC m=+7384.417173167" watchObservedRunningTime="2025-11-26 08:58:02.906524553 +0000 UTC m=+7384.426666162" Nov 26 08:58:04 crc kubenswrapper[4940]: I1126 08:58:04.166752 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:58:04 crc kubenswrapper[4940]: E1126 08:58:04.167626 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:58:04 crc kubenswrapper[4940]: I1126 08:58:04.895584 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3b3736b9-b106-4b89-a513-d5e5440ce386","Type":"ContainerStarted","Data":"3858826622d7957b33fc7c1937495ec7b129acef7b76b017881b5e11929da2c2"} Nov 26 08:58:09 crc kubenswrapper[4940]: I1126 08:58:09.048137 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-zqh8w"] Nov 26 08:58:09 crc kubenswrapper[4940]: I1126 08:58:09.055339 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-aca1-account-create-update-p795q"] Nov 26 08:58:09 crc kubenswrapper[4940]: I1126 08:58:09.070360 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-aca1-account-create-update-p795q"] Nov 26 08:58:09 crc kubenswrapper[4940]: I1126 08:58:09.081263 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-zqh8w"] Nov 26 08:58:09 crc kubenswrapper[4940]: I1126 08:58:09.176250 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="612f623d-62d5-4722-b290-7d2ab5cb4795" path="/var/lib/kubelet/pods/612f623d-62d5-4722-b290-7d2ab5cb4795/volumes" Nov 26 08:58:09 crc kubenswrapper[4940]: I1126 08:58:09.176961 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7f2a933-405d-4a9b-892f-d2fff3a10bf6" path="/var/lib/kubelet/pods/a7f2a933-405d-4a9b-892f-d2fff3a10bf6/volumes" Nov 26 08:58:09 crc kubenswrapper[4940]: I1126 08:58:09.978286 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3b3736b9-b106-4b89-a513-d5e5440ce386","Type":"ContainerStarted","Data":"d23164d9baf94aedc2320a0cce2becb141226ee983a6f5898df8037cb7d28f20"} Nov 26 08:58:13 crc kubenswrapper[4940]: I1126 08:58:13.013182 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3b3736b9-b106-4b89-a513-d5e5440ce386","Type":"ContainerStarted","Data":"8182c640cefe6d460732c304878c3a585bffe2bed185e6f2f77ddce985d1b01e"} Nov 26 08:58:13 crc kubenswrapper[4940]: I1126 08:58:13.038725 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.775654447 podStartE2EDuration="32.038697867s" podCreationTimestamp="2025-11-26 08:57:41 +0000 UTC" firstStartedPulling="2025-11-26 08:57:43.536452009 +0000 UTC m=+7365.056593628" lastFinishedPulling="2025-11-26 08:58:11.799495429 +0000 UTC m=+7393.319637048" observedRunningTime="2025-11-26 08:58:13.03694308 +0000 UTC m=+7394.557084709" watchObservedRunningTime="2025-11-26 08:58:13.038697867 +0000 UTC m=+7394.558839506" Nov 26 08:58:13 crc kubenswrapper[4940]: I1126 08:58:13.346669 4940 scope.go:117] "RemoveContainer" containerID="4918a2616cf200fcc9466f791913a5a17664b73fbdc23a0e59115e97ae89ebcf" Nov 26 08:58:13 crc kubenswrapper[4940]: I1126 08:58:13.378100 4940 scope.go:117] "RemoveContainer" containerID="969d72976e6ab72bf75e90b8ea97f84d0d5cb46ed5eb5d3a5b241bd66f66cc3a" Nov 26 08:58:13 crc kubenswrapper[4940]: I1126 08:58:13.440741 4940 scope.go:117] "RemoveContainer" containerID="5aa31e432606e2c15765302595b39ea553f09f834afdabc97d25da38d7694bec" Nov 26 08:58:13 crc kubenswrapper[4940]: I1126 08:58:13.481559 4940 scope.go:117] "RemoveContainer" containerID="a9650a5084fadb53159d17654cf524e8da140a51c8978bdb9a87f8f6a2d6339f" Nov 26 08:58:13 crc kubenswrapper[4940]: I1126 08:58:13.535457 4940 scope.go:117] "RemoveContainer" containerID="ee2ea4281e2b1df972f0ed2a2f2be7f68fca3ad07bd5a1706816358e021f7412" Nov 26 08:58:16 crc kubenswrapper[4940]: I1126 08:58:16.166650 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:58:16 crc kubenswrapper[4940]: E1126 08:58:16.167668 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:58:17 crc kubenswrapper[4940]: I1126 08:58:17.983715 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.217698 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.220654 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.222624 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-config-data\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.222802 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.222839 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-log-httpd\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.222883 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-scripts\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.224021 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.224096 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scxbx\" (UniqueName: \"kubernetes.io/projected/eef83081-e196-476b-9f7e-f39cfd51d781-kube-api-access-scxbx\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.224190 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-run-httpd\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.224194 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.224577 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.231739 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.325816 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-run-httpd\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.325938 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-config-data\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.326129 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.326166 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-log-httpd\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.326214 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-scripts\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.326333 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.326417 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scxbx\" (UniqueName: \"kubernetes.io/projected/eef83081-e196-476b-9f7e-f39cfd51d781-kube-api-access-scxbx\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.327442 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-run-httpd\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.328110 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-log-httpd\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.332622 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-scripts\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.332999 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-config-data\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.333829 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.342893 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.346425 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scxbx\" (UniqueName: \"kubernetes.io/projected/eef83081-e196-476b-9f7e-f39cfd51d781-kube-api-access-scxbx\") pod \"ceilometer-0\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " pod="openstack/ceilometer-0" Nov 26 08:58:19 crc kubenswrapper[4940]: I1126 08:58:19.547760 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:58:20 crc kubenswrapper[4940]: I1126 08:58:20.055150 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:20 crc kubenswrapper[4940]: I1126 08:58:20.104366 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerStarted","Data":"3cfdcabef67f2f92641b152f64d44f043fd04487e42ba9ddbe6a33673abdda2a"} Nov 26 08:58:24 crc kubenswrapper[4940]: I1126 08:58:24.031758 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:58:24 crc kubenswrapper[4940]: I1126 08:58:24.141470 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerStarted","Data":"6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25"} Nov 26 08:58:26 crc kubenswrapper[4940]: I1126 08:58:26.168503 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerStarted","Data":"d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481"} Nov 26 08:58:26 crc kubenswrapper[4940]: I1126 08:58:26.169111 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerStarted","Data":"571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e"} Nov 26 08:58:27 crc kubenswrapper[4940]: I1126 08:58:27.984440 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 26 08:58:27 crc kubenswrapper[4940]: I1126 08:58:27.987310 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 26 08:58:28 crc kubenswrapper[4940]: I1126 08:58:28.167078 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:58:28 crc kubenswrapper[4940]: E1126 08:58:28.167492 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:58:28 crc kubenswrapper[4940]: I1126 08:58:28.209724 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerStarted","Data":"6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84"} Nov 26 08:58:28 crc kubenswrapper[4940]: I1126 08:58:28.209759 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 08:58:28 crc kubenswrapper[4940]: I1126 08:58:28.212103 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 26 08:58:28 crc kubenswrapper[4940]: I1126 08:58:28.253907 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.947531082 podStartE2EDuration="9.253884425s" podCreationTimestamp="2025-11-26 08:58:19 +0000 UTC" firstStartedPulling="2025-11-26 08:58:20.055310006 +0000 UTC m=+7401.575451635" lastFinishedPulling="2025-11-26 08:58:27.361663319 +0000 UTC m=+7408.881804978" observedRunningTime="2025-11-26 08:58:28.242088085 +0000 UTC m=+7409.762229704" watchObservedRunningTime="2025-11-26 08:58:28.253884425 +0000 UTC m=+7409.774026054" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.025153 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-j7fd4"] Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.027264 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.036785 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-bdsxv"] Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.057798 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-j7fd4"] Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.067192 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-bdsxv"] Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.107095 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67n5q\" (UniqueName: \"kubernetes.io/projected/04e6903d-de70-4c7b-b9aa-d245708db3fc-kube-api-access-67n5q\") pod \"aodh-db-create-j7fd4\" (UID: \"04e6903d-de70-4c7b-b9aa-d245708db3fc\") " pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.107427 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04e6903d-de70-4c7b-b9aa-d245708db3fc-operator-scripts\") pod \"aodh-db-create-j7fd4\" (UID: \"04e6903d-de70-4c7b-b9aa-d245708db3fc\") " pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.134125 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-68c8-account-create-update-r5xqd"] Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.136053 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.138111 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.147060 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-68c8-account-create-update-r5xqd"] Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.209202 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-operator-scripts\") pod \"aodh-68c8-account-create-update-r5xqd\" (UID: \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\") " pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.209585 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hxkd\" (UniqueName: \"kubernetes.io/projected/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-kube-api-access-5hxkd\") pod \"aodh-68c8-account-create-update-r5xqd\" (UID: \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\") " pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.209693 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67n5q\" (UniqueName: \"kubernetes.io/projected/04e6903d-de70-4c7b-b9aa-d245708db3fc-kube-api-access-67n5q\") pod \"aodh-db-create-j7fd4\" (UID: \"04e6903d-de70-4c7b-b9aa-d245708db3fc\") " pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.209729 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04e6903d-de70-4c7b-b9aa-d245708db3fc-operator-scripts\") pod \"aodh-db-create-j7fd4\" (UID: \"04e6903d-de70-4c7b-b9aa-d245708db3fc\") " pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.210532 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04e6903d-de70-4c7b-b9aa-d245708db3fc-operator-scripts\") pod \"aodh-db-create-j7fd4\" (UID: \"04e6903d-de70-4c7b-b9aa-d245708db3fc\") " pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.228080 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67n5q\" (UniqueName: \"kubernetes.io/projected/04e6903d-de70-4c7b-b9aa-d245708db3fc-kube-api-access-67n5q\") pod \"aodh-db-create-j7fd4\" (UID: \"04e6903d-de70-4c7b-b9aa-d245708db3fc\") " pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.311964 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-operator-scripts\") pod \"aodh-68c8-account-create-update-r5xqd\" (UID: \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\") " pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.312179 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hxkd\" (UniqueName: \"kubernetes.io/projected/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-kube-api-access-5hxkd\") pod \"aodh-68c8-account-create-update-r5xqd\" (UID: \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\") " pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.312843 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-operator-scripts\") pod \"aodh-68c8-account-create-update-r5xqd\" (UID: \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\") " pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.329541 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hxkd\" (UniqueName: \"kubernetes.io/projected/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-kube-api-access-5hxkd\") pod \"aodh-68c8-account-create-update-r5xqd\" (UID: \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\") " pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.350751 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.454222 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:32 crc kubenswrapper[4940]: W1126 08:58:32.869097 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04e6903d_de70_4c7b_b9aa_d245708db3fc.slice/crio-aae3715a2676e7de05aa151fcb227b0b78b7fa9a73d9794e441ac3c26ea70bb0 WatchSource:0}: Error finding container aae3715a2676e7de05aa151fcb227b0b78b7fa9a73d9794e441ac3c26ea70bb0: Status 404 returned error can't find the container with id aae3715a2676e7de05aa151fcb227b0b78b7fa9a73d9794e441ac3c26ea70bb0 Nov 26 08:58:32 crc kubenswrapper[4940]: I1126 08:58:32.878137 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-j7fd4"] Nov 26 08:58:32 crc kubenswrapper[4940]: W1126 08:58:32.997972 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5160b9cf_50dd_4e95_a8b4_b0847dbb687e.slice/crio-4ba2dcb987f4294b6235e1da3b11dfe1f5425926c3ae3d865e2d93fda0afc339 WatchSource:0}: Error finding container 4ba2dcb987f4294b6235e1da3b11dfe1f5425926c3ae3d865e2d93fda0afc339: Status 404 returned error can't find the container with id 4ba2dcb987f4294b6235e1da3b11dfe1f5425926c3ae3d865e2d93fda0afc339 Nov 26 08:58:33 crc kubenswrapper[4940]: I1126 08:58:33.001800 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-68c8-account-create-update-r5xqd"] Nov 26 08:58:33 crc kubenswrapper[4940]: I1126 08:58:33.178348 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da9627dd-e950-47ee-93a6-3d6f10627b5d" path="/var/lib/kubelet/pods/da9627dd-e950-47ee-93a6-3d6f10627b5d/volumes" Nov 26 08:58:33 crc kubenswrapper[4940]: I1126 08:58:33.252429 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-68c8-account-create-update-r5xqd" event={"ID":"5160b9cf-50dd-4e95-a8b4-b0847dbb687e","Type":"ContainerStarted","Data":"91727520c673b658e892a75735af20c2f64b87bc61fefaa5d9db70480e72e25d"} Nov 26 08:58:33 crc kubenswrapper[4940]: I1126 08:58:33.252483 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-68c8-account-create-update-r5xqd" event={"ID":"5160b9cf-50dd-4e95-a8b4-b0847dbb687e","Type":"ContainerStarted","Data":"4ba2dcb987f4294b6235e1da3b11dfe1f5425926c3ae3d865e2d93fda0afc339"} Nov 26 08:58:33 crc kubenswrapper[4940]: I1126 08:58:33.255949 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-j7fd4" event={"ID":"04e6903d-de70-4c7b-b9aa-d245708db3fc","Type":"ContainerStarted","Data":"91d0469357e39e349bd300b71a5b64621f84ced0690d71a325fc889eb7dcace6"} Nov 26 08:58:33 crc kubenswrapper[4940]: I1126 08:58:33.255986 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-j7fd4" event={"ID":"04e6903d-de70-4c7b-b9aa-d245708db3fc","Type":"ContainerStarted","Data":"aae3715a2676e7de05aa151fcb227b0b78b7fa9a73d9794e441ac3c26ea70bb0"} Nov 26 08:58:33 crc kubenswrapper[4940]: I1126 08:58:33.275512 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-68c8-account-create-update-r5xqd" podStartSLOduration=1.275485486 podStartE2EDuration="1.275485486s" podCreationTimestamp="2025-11-26 08:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:58:33.267327204 +0000 UTC m=+7414.787468823" watchObservedRunningTime="2025-11-26 08:58:33.275485486 +0000 UTC m=+7414.795627115" Nov 26 08:58:33 crc kubenswrapper[4940]: I1126 08:58:33.299110 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-j7fd4" podStartSLOduration=1.299090435 podStartE2EDuration="1.299090435s" podCreationTimestamp="2025-11-26 08:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:58:33.279822935 +0000 UTC m=+7414.799964574" watchObservedRunningTime="2025-11-26 08:58:33.299090435 +0000 UTC m=+7414.819232084" Nov 26 08:58:34 crc kubenswrapper[4940]: I1126 08:58:34.268700 4940 generic.go:334] "Generic (PLEG): container finished" podID="5160b9cf-50dd-4e95-a8b4-b0847dbb687e" containerID="91727520c673b658e892a75735af20c2f64b87bc61fefaa5d9db70480e72e25d" exitCode=0 Nov 26 08:58:34 crc kubenswrapper[4940]: I1126 08:58:34.268776 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-68c8-account-create-update-r5xqd" event={"ID":"5160b9cf-50dd-4e95-a8b4-b0847dbb687e","Type":"ContainerDied","Data":"91727520c673b658e892a75735af20c2f64b87bc61fefaa5d9db70480e72e25d"} Nov 26 08:58:34 crc kubenswrapper[4940]: I1126 08:58:34.273279 4940 generic.go:334] "Generic (PLEG): container finished" podID="04e6903d-de70-4c7b-b9aa-d245708db3fc" containerID="91d0469357e39e349bd300b71a5b64621f84ced0690d71a325fc889eb7dcace6" exitCode=0 Nov 26 08:58:34 crc kubenswrapper[4940]: I1126 08:58:34.273320 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-j7fd4" event={"ID":"04e6903d-de70-4c7b-b9aa-d245708db3fc","Type":"ContainerDied","Data":"91d0469357e39e349bd300b71a5b64621f84ced0690d71a325fc889eb7dcace6"} Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.815360 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.822378 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.911819 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-operator-scripts\") pod \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\" (UID: \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\") " Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.911892 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04e6903d-de70-4c7b-b9aa-d245708db3fc-operator-scripts\") pod \"04e6903d-de70-4c7b-b9aa-d245708db3fc\" (UID: \"04e6903d-de70-4c7b-b9aa-d245708db3fc\") " Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.912022 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67n5q\" (UniqueName: \"kubernetes.io/projected/04e6903d-de70-4c7b-b9aa-d245708db3fc-kube-api-access-67n5q\") pod \"04e6903d-de70-4c7b-b9aa-d245708db3fc\" (UID: \"04e6903d-de70-4c7b-b9aa-d245708db3fc\") " Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.912074 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hxkd\" (UniqueName: \"kubernetes.io/projected/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-kube-api-access-5hxkd\") pod \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\" (UID: \"5160b9cf-50dd-4e95-a8b4-b0847dbb687e\") " Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.912556 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5160b9cf-50dd-4e95-a8b4-b0847dbb687e" (UID: "5160b9cf-50dd-4e95-a8b4-b0847dbb687e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.912655 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04e6903d-de70-4c7b-b9aa-d245708db3fc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "04e6903d-de70-4c7b-b9aa-d245708db3fc" (UID: "04e6903d-de70-4c7b-b9aa-d245708db3fc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.917630 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04e6903d-de70-4c7b-b9aa-d245708db3fc-kube-api-access-67n5q" (OuterVolumeSpecName: "kube-api-access-67n5q") pod "04e6903d-de70-4c7b-b9aa-d245708db3fc" (UID: "04e6903d-de70-4c7b-b9aa-d245708db3fc"). InnerVolumeSpecName "kube-api-access-67n5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:58:35 crc kubenswrapper[4940]: I1126 08:58:35.919357 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-kube-api-access-5hxkd" (OuterVolumeSpecName: "kube-api-access-5hxkd") pod "5160b9cf-50dd-4e95-a8b4-b0847dbb687e" (UID: "5160b9cf-50dd-4e95-a8b4-b0847dbb687e"). InnerVolumeSpecName "kube-api-access-5hxkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.014433 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.014469 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04e6903d-de70-4c7b-b9aa-d245708db3fc-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.014480 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67n5q\" (UniqueName: \"kubernetes.io/projected/04e6903d-de70-4c7b-b9aa-d245708db3fc-kube-api-access-67n5q\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.014490 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hxkd\" (UniqueName: \"kubernetes.io/projected/5160b9cf-50dd-4e95-a8b4-b0847dbb687e-kube-api-access-5hxkd\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.296810 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-68c8-account-create-update-r5xqd" event={"ID":"5160b9cf-50dd-4e95-a8b4-b0847dbb687e","Type":"ContainerDied","Data":"4ba2dcb987f4294b6235e1da3b11dfe1f5425926c3ae3d865e2d93fda0afc339"} Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.296853 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ba2dcb987f4294b6235e1da3b11dfe1f5425926c3ae3d865e2d93fda0afc339" Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.296910 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-68c8-account-create-update-r5xqd" Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.308720 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-j7fd4" event={"ID":"04e6903d-de70-4c7b-b9aa-d245708db3fc","Type":"ContainerDied","Data":"aae3715a2676e7de05aa151fcb227b0b78b7fa9a73d9794e441ac3c26ea70bb0"} Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.308766 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aae3715a2676e7de05aa151fcb227b0b78b7fa9a73d9794e441ac3c26ea70bb0" Nov 26 08:58:36 crc kubenswrapper[4940]: I1126 08:58:36.308835 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-j7fd4" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.418157 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-5jzzk"] Nov 26 08:58:37 crc kubenswrapper[4940]: E1126 08:58:37.418881 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5160b9cf-50dd-4e95-a8b4-b0847dbb687e" containerName="mariadb-account-create-update" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.418896 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5160b9cf-50dd-4e95-a8b4-b0847dbb687e" containerName="mariadb-account-create-update" Nov 26 08:58:37 crc kubenswrapper[4940]: E1126 08:58:37.418910 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e6903d-de70-4c7b-b9aa-d245708db3fc" containerName="mariadb-database-create" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.418916 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e6903d-de70-4c7b-b9aa-d245708db3fc" containerName="mariadb-database-create" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.419113 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5160b9cf-50dd-4e95-a8b4-b0847dbb687e" containerName="mariadb-account-create-update" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.419134 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="04e6903d-de70-4c7b-b9aa-d245708db3fc" containerName="mariadb-database-create" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.419863 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.421905 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-stk99" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.421912 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.422397 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.422615 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.431442 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-5jzzk"] Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.552765 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-scripts\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.553157 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h7j8\" (UniqueName: \"kubernetes.io/projected/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-kube-api-access-5h7j8\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.553293 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-config-data\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.553420 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-combined-ca-bundle\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.654845 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-scripts\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.654982 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h7j8\" (UniqueName: \"kubernetes.io/projected/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-kube-api-access-5h7j8\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.655054 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-config-data\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.655113 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-combined-ca-bundle\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.660785 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-scripts\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.661427 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-config-data\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.673803 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-combined-ca-bundle\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.674985 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h7j8\" (UniqueName: \"kubernetes.io/projected/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-kube-api-access-5h7j8\") pod \"aodh-db-sync-5jzzk\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:37 crc kubenswrapper[4940]: I1126 08:58:37.745196 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:38 crc kubenswrapper[4940]: I1126 08:58:38.198775 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-5jzzk"] Nov 26 08:58:38 crc kubenswrapper[4940]: I1126 08:58:38.340885 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-5jzzk" event={"ID":"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce","Type":"ContainerStarted","Data":"87b8448d85f3f62419575134a85d2d60192ae18ee1ca9dd60b7bfc11dc55967b"} Nov 26 08:58:42 crc kubenswrapper[4940]: I1126 08:58:42.166584 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:58:42 crc kubenswrapper[4940]: E1126 08:58:42.167247 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:58:43 crc kubenswrapper[4940]: I1126 08:58:43.397793 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-5jzzk" event={"ID":"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce","Type":"ContainerStarted","Data":"5d2ee2cab527127cfc2ce78991953831c352791e9763cb4611646ec99f2dfc80"} Nov 26 08:58:43 crc kubenswrapper[4940]: I1126 08:58:43.430963 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-5jzzk" podStartSLOduration=2.294799163 podStartE2EDuration="6.430928166s" podCreationTimestamp="2025-11-26 08:58:37 +0000 UTC" firstStartedPulling="2025-11-26 08:58:38.196927879 +0000 UTC m=+7419.717069498" lastFinishedPulling="2025-11-26 08:58:42.333056882 +0000 UTC m=+7423.853198501" observedRunningTime="2025-11-26 08:58:43.421289737 +0000 UTC m=+7424.941431396" watchObservedRunningTime="2025-11-26 08:58:43.430928166 +0000 UTC m=+7424.951069785" Nov 26 08:58:45 crc kubenswrapper[4940]: I1126 08:58:45.426939 4940 generic.go:334] "Generic (PLEG): container finished" podID="36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" containerID="5d2ee2cab527127cfc2ce78991953831c352791e9763cb4611646ec99f2dfc80" exitCode=0 Nov 26 08:58:45 crc kubenswrapper[4940]: I1126 08:58:45.426983 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-5jzzk" event={"ID":"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce","Type":"ContainerDied","Data":"5d2ee2cab527127cfc2ce78991953831c352791e9763cb4611646ec99f2dfc80"} Nov 26 08:58:46 crc kubenswrapper[4940]: I1126 08:58:46.851940 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:46 crc kubenswrapper[4940]: I1126 08:58:46.960450 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-scripts\") pod \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " Nov 26 08:58:46 crc kubenswrapper[4940]: I1126 08:58:46.960641 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-config-data\") pod \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " Nov 26 08:58:46 crc kubenswrapper[4940]: I1126 08:58:46.960750 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-combined-ca-bundle\") pod \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " Nov 26 08:58:46 crc kubenswrapper[4940]: I1126 08:58:46.960798 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5h7j8\" (UniqueName: \"kubernetes.io/projected/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-kube-api-access-5h7j8\") pod \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\" (UID: \"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce\") " Nov 26 08:58:46 crc kubenswrapper[4940]: I1126 08:58:46.967427 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-kube-api-access-5h7j8" (OuterVolumeSpecName: "kube-api-access-5h7j8") pod "36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" (UID: "36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce"). InnerVolumeSpecName "kube-api-access-5h7j8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:58:46 crc kubenswrapper[4940]: I1126 08:58:46.969426 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-scripts" (OuterVolumeSpecName: "scripts") pod "36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" (UID: "36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:58:46 crc kubenswrapper[4940]: I1126 08:58:46.998142 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" (UID: "36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:58:47 crc kubenswrapper[4940]: I1126 08:58:47.013552 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-config-data" (OuterVolumeSpecName: "config-data") pod "36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" (UID: "36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:58:47 crc kubenswrapper[4940]: I1126 08:58:47.063515 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:47 crc kubenswrapper[4940]: I1126 08:58:47.063557 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:47 crc kubenswrapper[4940]: I1126 08:58:47.063571 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:47 crc kubenswrapper[4940]: I1126 08:58:47.063586 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5h7j8\" (UniqueName: \"kubernetes.io/projected/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce-kube-api-access-5h7j8\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:47 crc kubenswrapper[4940]: I1126 08:58:47.462104 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-5jzzk" event={"ID":"36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce","Type":"ContainerDied","Data":"87b8448d85f3f62419575134a85d2d60192ae18ee1ca9dd60b7bfc11dc55967b"} Nov 26 08:58:47 crc kubenswrapper[4940]: I1126 08:58:47.462673 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87b8448d85f3f62419575134a85d2d60192ae18ee1ca9dd60b7bfc11dc55967b" Nov 26 08:58:47 crc kubenswrapper[4940]: I1126 08:58:47.462178 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-5jzzk" Nov 26 08:58:49 crc kubenswrapper[4940]: I1126 08:58:49.554527 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.558706 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 26 08:58:52 crc kubenswrapper[4940]: E1126 08:58:52.559742 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" containerName="aodh-db-sync" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.559768 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" containerName="aodh-db-sync" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.560170 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" containerName="aodh-db-sync" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.563364 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.566206 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.566735 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-stk99" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.567007 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.570976 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.687560 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-combined-ca-bundle\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.687657 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-config-data\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.687695 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-scripts\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.687787 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htch5\" (UniqueName: \"kubernetes.io/projected/717eab22-62dc-453f-b481-c30aac72c2ca-kube-api-access-htch5\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.789984 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htch5\" (UniqueName: \"kubernetes.io/projected/717eab22-62dc-453f-b481-c30aac72c2ca-kube-api-access-htch5\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.790138 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-combined-ca-bundle\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.790176 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-config-data\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.790200 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-scripts\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.798719 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-scripts\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.799261 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-config-data\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.807873 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htch5\" (UniqueName: \"kubernetes.io/projected/717eab22-62dc-453f-b481-c30aac72c2ca-kube-api-access-htch5\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.832683 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/717eab22-62dc-453f-b481-c30aac72c2ca-combined-ca-bundle\") pod \"aodh-0\" (UID: \"717eab22-62dc-453f-b481-c30aac72c2ca\") " pod="openstack/aodh-0" Nov 26 08:58:52 crc kubenswrapper[4940]: I1126 08:58:52.896793 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 08:58:53 crc kubenswrapper[4940]: I1126 08:58:53.426748 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 08:58:53 crc kubenswrapper[4940]: I1126 08:58:53.540693 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"717eab22-62dc-453f-b481-c30aac72c2ca","Type":"ContainerStarted","Data":"49c7d680c37cf7b524dd0eb56a90e68307f3bb974e709ff3d76c5a03d3075dd7"} Nov 26 08:58:54 crc kubenswrapper[4940]: I1126 08:58:54.165317 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:58:54 crc kubenswrapper[4940]: E1126 08:58:54.165622 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:58:54 crc kubenswrapper[4940]: I1126 08:58:54.554137 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"717eab22-62dc-453f-b481-c30aac72c2ca","Type":"ContainerStarted","Data":"9afa217c20a393b9205091abc0541dfcb065651368ed924271434bd3af6cc324"} Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.152880 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.153178 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="ceilometer-central-agent" containerID="cri-o://6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25" gracePeriod=30 Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.153248 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="sg-core" containerID="cri-o://d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481" gracePeriod=30 Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.153308 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="ceilometer-notification-agent" containerID="cri-o://571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e" gracePeriod=30 Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.153324 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="proxy-httpd" containerID="cri-o://6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84" gracePeriod=30 Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.567158 4940 generic.go:334] "Generic (PLEG): container finished" podID="eef83081-e196-476b-9f7e-f39cfd51d781" containerID="6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84" exitCode=0 Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.568153 4940 generic.go:334] "Generic (PLEG): container finished" podID="eef83081-e196-476b-9f7e-f39cfd51d781" containerID="d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481" exitCode=2 Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.568171 4940 generic.go:334] "Generic (PLEG): container finished" podID="eef83081-e196-476b-9f7e-f39cfd51d781" containerID="6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25" exitCode=0 Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.567233 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerDied","Data":"6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84"} Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.568303 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerDied","Data":"d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481"} Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.568321 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerDied","Data":"6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25"} Nov 26 08:58:55 crc kubenswrapper[4940]: I1126 08:58:55.571225 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"717eab22-62dc-453f-b481-c30aac72c2ca","Type":"ContainerStarted","Data":"480b294006d54e2c946f7702918a861a3fdf281edaca11f3d9629e0cfcc093e0"} Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.296946 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.386841 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scxbx\" (UniqueName: \"kubernetes.io/projected/eef83081-e196-476b-9f7e-f39cfd51d781-kube-api-access-scxbx\") pod \"eef83081-e196-476b-9f7e-f39cfd51d781\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.386979 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-scripts\") pod \"eef83081-e196-476b-9f7e-f39cfd51d781\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.387021 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-run-httpd\") pod \"eef83081-e196-476b-9f7e-f39cfd51d781\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.387057 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-log-httpd\") pod \"eef83081-e196-476b-9f7e-f39cfd51d781\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.387109 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-combined-ca-bundle\") pod \"eef83081-e196-476b-9f7e-f39cfd51d781\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.387177 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-sg-core-conf-yaml\") pod \"eef83081-e196-476b-9f7e-f39cfd51d781\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.387241 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-config-data\") pod \"eef83081-e196-476b-9f7e-f39cfd51d781\" (UID: \"eef83081-e196-476b-9f7e-f39cfd51d781\") " Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.387795 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "eef83081-e196-476b-9f7e-f39cfd51d781" (UID: "eef83081-e196-476b-9f7e-f39cfd51d781"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.387989 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "eef83081-e196-476b-9f7e-f39cfd51d781" (UID: "eef83081-e196-476b-9f7e-f39cfd51d781"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.394147 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-scripts" (OuterVolumeSpecName: "scripts") pod "eef83081-e196-476b-9f7e-f39cfd51d781" (UID: "eef83081-e196-476b-9f7e-f39cfd51d781"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.399635 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eef83081-e196-476b-9f7e-f39cfd51d781-kube-api-access-scxbx" (OuterVolumeSpecName: "kube-api-access-scxbx") pod "eef83081-e196-476b-9f7e-f39cfd51d781" (UID: "eef83081-e196-476b-9f7e-f39cfd51d781"). InnerVolumeSpecName "kube-api-access-scxbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.461689 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "eef83081-e196-476b-9f7e-f39cfd51d781" (UID: "eef83081-e196-476b-9f7e-f39cfd51d781"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.489413 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scxbx\" (UniqueName: \"kubernetes.io/projected/eef83081-e196-476b-9f7e-f39cfd51d781-kube-api-access-scxbx\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.489454 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.489465 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.489474 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eef83081-e196-476b-9f7e-f39cfd51d781-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.489482 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.504192 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-config-data" (OuterVolumeSpecName: "config-data") pod "eef83081-e196-476b-9f7e-f39cfd51d781" (UID: "eef83081-e196-476b-9f7e-f39cfd51d781"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.512354 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eef83081-e196-476b-9f7e-f39cfd51d781" (UID: "eef83081-e196-476b-9f7e-f39cfd51d781"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.591545 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.591583 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef83081-e196-476b-9f7e-f39cfd51d781-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.594708 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"717eab22-62dc-453f-b481-c30aac72c2ca","Type":"ContainerStarted","Data":"4ed829e356eb59b7a19c1e554fb9dc90509df61d828564956cbf61555be97cd1"} Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.601408 4940 generic.go:334] "Generic (PLEG): container finished" podID="eef83081-e196-476b-9f7e-f39cfd51d781" containerID="571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e" exitCode=0 Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.601474 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerDied","Data":"571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e"} Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.601528 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eef83081-e196-476b-9f7e-f39cfd51d781","Type":"ContainerDied","Data":"3cfdcabef67f2f92641b152f64d44f043fd04487e42ba9ddbe6a33673abdda2a"} Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.601551 4940 scope.go:117] "RemoveContainer" containerID="6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.601767 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.633136 4940 scope.go:117] "RemoveContainer" containerID="d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.651530 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.680166 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.685858 4940 scope.go:117] "RemoveContainer" containerID="571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.692572 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:57 crc kubenswrapper[4940]: E1126 08:58:57.697558 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="ceilometer-central-agent" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.697602 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="ceilometer-central-agent" Nov 26 08:58:57 crc kubenswrapper[4940]: E1126 08:58:57.697619 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="proxy-httpd" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.697629 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="proxy-httpd" Nov 26 08:58:57 crc kubenswrapper[4940]: E1126 08:58:57.697650 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="sg-core" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.697657 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="sg-core" Nov 26 08:58:57 crc kubenswrapper[4940]: E1126 08:58:57.697671 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="ceilometer-notification-agent" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.697677 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="ceilometer-notification-agent" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.697865 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="ceilometer-central-agent" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.697882 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="proxy-httpd" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.697893 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="sg-core" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.697921 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" containerName="ceilometer-notification-agent" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.702208 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.705367 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.707744 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.707796 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.795948 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.796095 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-config-data\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.796234 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9pz9\" (UniqueName: \"kubernetes.io/projected/fb9106b6-499d-42d5-a7ac-7ab522135d91-kube-api-access-p9pz9\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.796318 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.796360 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-run-httpd\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.796388 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-scripts\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.796439 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-log-httpd\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.897978 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.898324 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-run-httpd\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.898426 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-scripts\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.898547 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-log-httpd\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.898627 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.898735 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-config-data\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.898887 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9pz9\" (UniqueName: \"kubernetes.io/projected/fb9106b6-499d-42d5-a7ac-7ab522135d91-kube-api-access-p9pz9\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.900499 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-log-httpd\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.900587 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-run-httpd\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.905241 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-config-data\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.906268 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-scripts\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.908534 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.913882 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9pz9\" (UniqueName: \"kubernetes.io/projected/fb9106b6-499d-42d5-a7ac-7ab522135d91-kube-api-access-p9pz9\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:57 crc kubenswrapper[4940]: I1126 08:58:57.916179 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " pod="openstack/ceilometer-0" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.024936 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.203308 4940 scope.go:117] "RemoveContainer" containerID="6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.284504 4940 scope.go:117] "RemoveContainer" containerID="6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84" Nov 26 08:58:58 crc kubenswrapper[4940]: E1126 08:58:58.285563 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84\": container with ID starting with 6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84 not found: ID does not exist" containerID="6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.285621 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84"} err="failed to get container status \"6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84\": rpc error: code = NotFound desc = could not find container \"6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84\": container with ID starting with 6b63f64c85f7d94c25609297c30a956932580de50133149ab3c9b8590b2a2d84 not found: ID does not exist" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.285655 4940 scope.go:117] "RemoveContainer" containerID="d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481" Nov 26 08:58:58 crc kubenswrapper[4940]: E1126 08:58:58.286299 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481\": container with ID starting with d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481 not found: ID does not exist" containerID="d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.286338 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481"} err="failed to get container status \"d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481\": rpc error: code = NotFound desc = could not find container \"d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481\": container with ID starting with d2f2062c2d11e65767e4f3783f237a26711050d130da1b32f7382af50ffdb481 not found: ID does not exist" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.286364 4940 scope.go:117] "RemoveContainer" containerID="571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e" Nov 26 08:58:58 crc kubenswrapper[4940]: E1126 08:58:58.286681 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e\": container with ID starting with 571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e not found: ID does not exist" containerID="571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.286724 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e"} err="failed to get container status \"571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e\": rpc error: code = NotFound desc = could not find container \"571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e\": container with ID starting with 571f59922254359167569901a065cf764ac2ea153eb22da9af7957d162ba5f7e not found: ID does not exist" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.286748 4940 scope.go:117] "RemoveContainer" containerID="6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25" Nov 26 08:58:58 crc kubenswrapper[4940]: E1126 08:58:58.287152 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25\": container with ID starting with 6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25 not found: ID does not exist" containerID="6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.287193 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25"} err="failed to get container status \"6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25\": rpc error: code = NotFound desc = could not find container \"6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25\": container with ID starting with 6a0ca8d8f7df8959559cb8a7cb8bb6b2efd2a414b5b087103452012b9d863b25 not found: ID does not exist" Nov 26 08:58:58 crc kubenswrapper[4940]: I1126 08:58:58.764596 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:58:58 crc kubenswrapper[4940]: W1126 08:58:58.801354 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb9106b6_499d_42d5_a7ac_7ab522135d91.slice/crio-0d80df9f7cf780908c025bd0160142be3da8772349adaff82afeda07c2fd1528 WatchSource:0}: Error finding container 0d80df9f7cf780908c025bd0160142be3da8772349adaff82afeda07c2fd1528: Status 404 returned error can't find the container with id 0d80df9f7cf780908c025bd0160142be3da8772349adaff82afeda07c2fd1528 Nov 26 08:58:59 crc kubenswrapper[4940]: I1126 08:58:59.183007 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eef83081-e196-476b-9f7e-f39cfd51d781" path="/var/lib/kubelet/pods/eef83081-e196-476b-9f7e-f39cfd51d781/volumes" Nov 26 08:58:59 crc kubenswrapper[4940]: I1126 08:58:59.626917 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerStarted","Data":"0d80df9f7cf780908c025bd0160142be3da8772349adaff82afeda07c2fd1528"} Nov 26 08:59:05 crc kubenswrapper[4940]: I1126 08:59:05.053427 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-984f-account-create-update-ht6qm"] Nov 26 08:59:05 crc kubenswrapper[4940]: I1126 08:59:05.066960 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-ktrvj"] Nov 26 08:59:05 crc kubenswrapper[4940]: I1126 08:59:05.081760 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-984f-account-create-update-ht6qm"] Nov 26 08:59:05 crc kubenswrapper[4940]: I1126 08:59:05.089929 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-ktrvj"] Nov 26 08:59:05 crc kubenswrapper[4940]: I1126 08:59:05.181411 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37c5efdc-60d5-4bf3-b84f-84459d2ba431" path="/var/lib/kubelet/pods/37c5efdc-60d5-4bf3-b84f-84459d2ba431/volumes" Nov 26 08:59:05 crc kubenswrapper[4940]: I1126 08:59:05.183235 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="623678ef-a5c7-48a1-ac34-f65b60fd7d54" path="/var/lib/kubelet/pods/623678ef-a5c7-48a1-ac34-f65b60fd7d54/volumes" Nov 26 08:59:07 crc kubenswrapper[4940]: I1126 08:59:07.165962 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:59:07 crc kubenswrapper[4940]: E1126 08:59:07.166837 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:59:07 crc kubenswrapper[4940]: I1126 08:59:07.720283 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"717eab22-62dc-453f-b481-c30aac72c2ca","Type":"ContainerStarted","Data":"b9f1d8f4def46289461157ff7fcf91a5aa2b0280a614c096c6b968b29076af5e"} Nov 26 08:59:07 crc kubenswrapper[4940]: I1126 08:59:07.723359 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerStarted","Data":"8c0f9554e6ab17408a1f1c872b29999fbf74f97a19aa9b5aae157994b35bcff7"} Nov 26 08:59:07 crc kubenswrapper[4940]: I1126 08:59:07.723408 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerStarted","Data":"25da152aad697c9acf066a165ee837505c27424621513e0784ac56a05f7fff52"} Nov 26 08:59:07 crc kubenswrapper[4940]: I1126 08:59:07.759846 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.777748387 podStartE2EDuration="15.759825895s" podCreationTimestamp="2025-11-26 08:58:52 +0000 UTC" firstStartedPulling="2025-11-26 08:58:53.455219332 +0000 UTC m=+7434.975360951" lastFinishedPulling="2025-11-26 08:59:06.43729685 +0000 UTC m=+7447.957438459" observedRunningTime="2025-11-26 08:59:07.748421349 +0000 UTC m=+7449.268562968" watchObservedRunningTime="2025-11-26 08:59:07.759825895 +0000 UTC m=+7449.279967514" Nov 26 08:59:08 crc kubenswrapper[4940]: I1126 08:59:08.736211 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerStarted","Data":"dd9e8c6230bd914b78ae2d1a2f6186755df23f3ec52c13ab2e8ca8453b94a992"} Nov 26 08:59:10 crc kubenswrapper[4940]: I1126 08:59:10.764584 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerStarted","Data":"3753b63fd2840eac999c436c7e8d5a13044b933eb1e06494d6ee45f4caad1475"} Nov 26 08:59:10 crc kubenswrapper[4940]: I1126 08:59:10.765051 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 08:59:10 crc kubenswrapper[4940]: I1126 08:59:10.800166 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.017271163 podStartE2EDuration="13.80014385s" podCreationTimestamp="2025-11-26 08:58:57 +0000 UTC" firstStartedPulling="2025-11-26 08:58:58.803902096 +0000 UTC m=+7440.324043715" lastFinishedPulling="2025-11-26 08:59:09.586774783 +0000 UTC m=+7451.106916402" observedRunningTime="2025-11-26 08:59:10.788682691 +0000 UTC m=+7452.308824320" watchObservedRunningTime="2025-11-26 08:59:10.80014385 +0000 UTC m=+7452.320285469" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.135528 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-8ww7r"] Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.137001 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.157011 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-8ww7r"] Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.200180 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a9767e-6575-42eb-a5fd-15b4b24ce12e-operator-scripts\") pod \"manila-db-create-8ww7r\" (UID: \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\") " pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.200371 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhs2t\" (UniqueName: \"kubernetes.io/projected/90a9767e-6575-42eb-a5fd-15b4b24ce12e-kube-api-access-dhs2t\") pod \"manila-db-create-8ww7r\" (UID: \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\") " pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.263876 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-bb8e-account-create-update-rxrfg"] Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.265212 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.267312 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.276856 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-bb8e-account-create-update-rxrfg"] Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.318009 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhs2t\" (UniqueName: \"kubernetes.io/projected/90a9767e-6575-42eb-a5fd-15b4b24ce12e-kube-api-access-dhs2t\") pod \"manila-db-create-8ww7r\" (UID: \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\") " pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.318405 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a83f6edc-021b-4530-a459-13a1cacd0e9b-operator-scripts\") pod \"manila-bb8e-account-create-update-rxrfg\" (UID: \"a83f6edc-021b-4530-a459-13a1cacd0e9b\") " pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.318667 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a9767e-6575-42eb-a5fd-15b4b24ce12e-operator-scripts\") pod \"manila-db-create-8ww7r\" (UID: \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\") " pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.321884 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a9767e-6575-42eb-a5fd-15b4b24ce12e-operator-scripts\") pod \"manila-db-create-8ww7r\" (UID: \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\") " pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.322239 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p9ct\" (UniqueName: \"kubernetes.io/projected/a83f6edc-021b-4530-a459-13a1cacd0e9b-kube-api-access-4p9ct\") pod \"manila-bb8e-account-create-update-rxrfg\" (UID: \"a83f6edc-021b-4530-a459-13a1cacd0e9b\") " pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.344563 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhs2t\" (UniqueName: \"kubernetes.io/projected/90a9767e-6575-42eb-a5fd-15b4b24ce12e-kube-api-access-dhs2t\") pod \"manila-db-create-8ww7r\" (UID: \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\") " pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.424616 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a83f6edc-021b-4530-a459-13a1cacd0e9b-operator-scripts\") pod \"manila-bb8e-account-create-update-rxrfg\" (UID: \"a83f6edc-021b-4530-a459-13a1cacd0e9b\") " pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.424758 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p9ct\" (UniqueName: \"kubernetes.io/projected/a83f6edc-021b-4530-a459-13a1cacd0e9b-kube-api-access-4p9ct\") pod \"manila-bb8e-account-create-update-rxrfg\" (UID: \"a83f6edc-021b-4530-a459-13a1cacd0e9b\") " pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.425327 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a83f6edc-021b-4530-a459-13a1cacd0e9b-operator-scripts\") pod \"manila-bb8e-account-create-update-rxrfg\" (UID: \"a83f6edc-021b-4530-a459-13a1cacd0e9b\") " pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.447777 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p9ct\" (UniqueName: \"kubernetes.io/projected/a83f6edc-021b-4530-a459-13a1cacd0e9b-kube-api-access-4p9ct\") pod \"manila-bb8e-account-create-update-rxrfg\" (UID: \"a83f6edc-021b-4530-a459-13a1cacd0e9b\") " pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.455787 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:12 crc kubenswrapper[4940]: I1126 08:59:12.609977 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.041132 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-8ww7r"] Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.251622 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-bb8e-account-create-update-rxrfg"] Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.687393 4940 scope.go:117] "RemoveContainer" containerID="cfa131c65c040ecbbf657175a4367c918a0f73bb202f333239949aee6bde1f75" Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.708376 4940 scope.go:117] "RemoveContainer" containerID="44d9771090f0e1948df63e142bdf4b6ac9cbabfa67185f72108fbb55a8c7037d" Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.749571 4940 scope.go:117] "RemoveContainer" containerID="bf9dd3dba79c071c7e7825c1be144a898ad7799b708b3f6d19a7cb61c4772639" Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.841661 4940 generic.go:334] "Generic (PLEG): container finished" podID="90a9767e-6575-42eb-a5fd-15b4b24ce12e" containerID="6474e80085ddce58dc6f946c50ba1c520fded4a4ae03eca65358f56b4c38f263" exitCode=0 Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.841715 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-8ww7r" event={"ID":"90a9767e-6575-42eb-a5fd-15b4b24ce12e","Type":"ContainerDied","Data":"6474e80085ddce58dc6f946c50ba1c520fded4a4ae03eca65358f56b4c38f263"} Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.841736 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-8ww7r" event={"ID":"90a9767e-6575-42eb-a5fd-15b4b24ce12e","Type":"ContainerStarted","Data":"2170d587cfc068adb8d8fe93692e259e135dde613cb627a9753cfa4a16b10899"} Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.849346 4940 generic.go:334] "Generic (PLEG): container finished" podID="a83f6edc-021b-4530-a459-13a1cacd0e9b" containerID="39aa83f6c8d4075b792cc1f0e9c2c9955a870f9f1c9e38e020d6dbc8800ee30c" exitCode=0 Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.849411 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-bb8e-account-create-update-rxrfg" event={"ID":"a83f6edc-021b-4530-a459-13a1cacd0e9b","Type":"ContainerDied","Data":"39aa83f6c8d4075b792cc1f0e9c2c9955a870f9f1c9e38e020d6dbc8800ee30c"} Nov 26 08:59:13 crc kubenswrapper[4940]: I1126 08:59:13.849435 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-bb8e-account-create-update-rxrfg" event={"ID":"a83f6edc-021b-4530-a459-13a1cacd0e9b","Type":"ContainerStarted","Data":"95e7d59e758e10d766949a596ceb29fdcd6bf1415f587787dfa26d115cfc38da"} Nov 26 08:59:14 crc kubenswrapper[4940]: I1126 08:59:14.035711 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-4rr8h"] Nov 26 08:59:14 crc kubenswrapper[4940]: I1126 08:59:14.044763 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-4rr8h"] Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.180809 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1111ad8a-5b85-45ea-9bf7-693eef0556db" path="/var/lib/kubelet/pods/1111ad8a-5b85-45ea-9bf7-693eef0556db/volumes" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.389548 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.394922 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.588305 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a83f6edc-021b-4530-a459-13a1cacd0e9b-operator-scripts\") pod \"a83f6edc-021b-4530-a459-13a1cacd0e9b\" (UID: \"a83f6edc-021b-4530-a459-13a1cacd0e9b\") " Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.588845 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a83f6edc-021b-4530-a459-13a1cacd0e9b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a83f6edc-021b-4530-a459-13a1cacd0e9b" (UID: "a83f6edc-021b-4530-a459-13a1cacd0e9b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.589205 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a9767e-6575-42eb-a5fd-15b4b24ce12e-operator-scripts\") pod \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\" (UID: \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\") " Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.589512 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhs2t\" (UniqueName: \"kubernetes.io/projected/90a9767e-6575-42eb-a5fd-15b4b24ce12e-kube-api-access-dhs2t\") pod \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\" (UID: \"90a9767e-6575-42eb-a5fd-15b4b24ce12e\") " Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.590269 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p9ct\" (UniqueName: \"kubernetes.io/projected/a83f6edc-021b-4530-a459-13a1cacd0e9b-kube-api-access-4p9ct\") pod \"a83f6edc-021b-4530-a459-13a1cacd0e9b\" (UID: \"a83f6edc-021b-4530-a459-13a1cacd0e9b\") " Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.589870 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90a9767e-6575-42eb-a5fd-15b4b24ce12e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "90a9767e-6575-42eb-a5fd-15b4b24ce12e" (UID: "90a9767e-6575-42eb-a5fd-15b4b24ce12e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.592469 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a83f6edc-021b-4530-a459-13a1cacd0e9b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.592604 4940 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a9767e-6575-42eb-a5fd-15b4b24ce12e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.600372 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a83f6edc-021b-4530-a459-13a1cacd0e9b-kube-api-access-4p9ct" (OuterVolumeSpecName: "kube-api-access-4p9ct") pod "a83f6edc-021b-4530-a459-13a1cacd0e9b" (UID: "a83f6edc-021b-4530-a459-13a1cacd0e9b"). InnerVolumeSpecName "kube-api-access-4p9ct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.600435 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90a9767e-6575-42eb-a5fd-15b4b24ce12e-kube-api-access-dhs2t" (OuterVolumeSpecName: "kube-api-access-dhs2t") pod "90a9767e-6575-42eb-a5fd-15b4b24ce12e" (UID: "90a9767e-6575-42eb-a5fd-15b4b24ce12e"). InnerVolumeSpecName "kube-api-access-dhs2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.694585 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p9ct\" (UniqueName: \"kubernetes.io/projected/a83f6edc-021b-4530-a459-13a1cacd0e9b-kube-api-access-4p9ct\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.694624 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhs2t\" (UniqueName: \"kubernetes.io/projected/90a9767e-6575-42eb-a5fd-15b4b24ce12e-kube-api-access-dhs2t\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.874411 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-8ww7r" event={"ID":"90a9767e-6575-42eb-a5fd-15b4b24ce12e","Type":"ContainerDied","Data":"2170d587cfc068adb8d8fe93692e259e135dde613cb627a9753cfa4a16b10899"} Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.874473 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-8ww7r" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.874482 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2170d587cfc068adb8d8fe93692e259e135dde613cb627a9753cfa4a16b10899" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.876560 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-bb8e-account-create-update-rxrfg" event={"ID":"a83f6edc-021b-4530-a459-13a1cacd0e9b","Type":"ContainerDied","Data":"95e7d59e758e10d766949a596ceb29fdcd6bf1415f587787dfa26d115cfc38da"} Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.876602 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95e7d59e758e10d766949a596ceb29fdcd6bf1415f587787dfa26d115cfc38da" Nov 26 08:59:15 crc kubenswrapper[4940]: I1126 08:59:15.876648 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-bb8e-account-create-update-rxrfg" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.531661 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-ddsss"] Nov 26 08:59:17 crc kubenswrapper[4940]: E1126 08:59:17.532796 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90a9767e-6575-42eb-a5fd-15b4b24ce12e" containerName="mariadb-database-create" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.532815 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="90a9767e-6575-42eb-a5fd-15b4b24ce12e" containerName="mariadb-database-create" Nov 26 08:59:17 crc kubenswrapper[4940]: E1126 08:59:17.532845 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a83f6edc-021b-4530-a459-13a1cacd0e9b" containerName="mariadb-account-create-update" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.532853 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a83f6edc-021b-4530-a459-13a1cacd0e9b" containerName="mariadb-account-create-update" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.533118 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a83f6edc-021b-4530-a459-13a1cacd0e9b" containerName="mariadb-account-create-update" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.533237 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="90a9767e-6575-42eb-a5fd-15b4b24ce12e" containerName="mariadb-database-create" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.534212 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.537590 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-d7qjg" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.537798 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.546311 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-ddsss"] Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.603873 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pndmx"] Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.606958 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.620811 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pndmx"] Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.634127 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-utilities\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.634172 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-job-config-data\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.634231 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5jzk\" (UniqueName: \"kubernetes.io/projected/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-kube-api-access-r5jzk\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.634262 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-combined-ca-bundle\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.634278 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnppm\" (UniqueName: \"kubernetes.io/projected/f9669e1c-3fe0-49e5-bf17-012bf16e9944-kube-api-access-gnppm\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.634310 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-catalog-content\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.634340 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-config-data\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.736249 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-catalog-content\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.736328 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-config-data\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.736408 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-utilities\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.736439 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-job-config-data\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.736504 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5jzk\" (UniqueName: \"kubernetes.io/projected/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-kube-api-access-r5jzk\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.736538 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-combined-ca-bundle\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.736554 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnppm\" (UniqueName: \"kubernetes.io/projected/f9669e1c-3fe0-49e5-bf17-012bf16e9944-kube-api-access-gnppm\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.736686 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-catalog-content\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.737366 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-utilities\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.741797 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-combined-ca-bundle\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.742208 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-config-data\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.744460 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-job-config-data\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.758015 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5jzk\" (UniqueName: \"kubernetes.io/projected/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-kube-api-access-r5jzk\") pod \"community-operators-pndmx\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.758603 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnppm\" (UniqueName: \"kubernetes.io/projected/f9669e1c-3fe0-49e5-bf17-012bf16e9944-kube-api-access-gnppm\") pod \"manila-db-sync-ddsss\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.849867 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:17 crc kubenswrapper[4940]: I1126 08:59:17.935237 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:18 crc kubenswrapper[4940]: W1126 08:59:18.484269 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab40c62c_dee2_423d_9e5d_d28bf9c8f6c8.slice/crio-71611586fe40c7d1b7e417a663cb334a9100c202c7114509e894cf8a5a4b6f7c WatchSource:0}: Error finding container 71611586fe40c7d1b7e417a663cb334a9100c202c7114509e894cf8a5a4b6f7c: Status 404 returned error can't find the container with id 71611586fe40c7d1b7e417a663cb334a9100c202c7114509e894cf8a5a4b6f7c Nov 26 08:59:18 crc kubenswrapper[4940]: I1126 08:59:18.485718 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pndmx"] Nov 26 08:59:18 crc kubenswrapper[4940]: I1126 08:59:18.681628 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-ddsss"] Nov 26 08:59:18 crc kubenswrapper[4940]: I1126 08:59:18.909238 4940 generic.go:334] "Generic (PLEG): container finished" podID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerID="9482552e818f547975dfcc8fabcd71772e698acf55cfb7039606bb4ddbf66876" exitCode=0 Nov 26 08:59:18 crc kubenswrapper[4940]: I1126 08:59:18.909307 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pndmx" event={"ID":"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8","Type":"ContainerDied","Data":"9482552e818f547975dfcc8fabcd71772e698acf55cfb7039606bb4ddbf66876"} Nov 26 08:59:18 crc kubenswrapper[4940]: I1126 08:59:18.909333 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pndmx" event={"ID":"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8","Type":"ContainerStarted","Data":"71611586fe40c7d1b7e417a663cb334a9100c202c7114509e894cf8a5a4b6f7c"} Nov 26 08:59:18 crc kubenswrapper[4940]: I1126 08:59:18.922609 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-ddsss" event={"ID":"f9669e1c-3fe0-49e5-bf17-012bf16e9944","Type":"ContainerStarted","Data":"d2d4b520306a555bad452484aa294d7bea05d58b684f7033149c7199bcedb308"} Nov 26 08:59:19 crc kubenswrapper[4940]: I1126 08:59:19.177388 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:59:19 crc kubenswrapper[4940]: E1126 08:59:19.177713 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:59:20 crc kubenswrapper[4940]: I1126 08:59:20.948171 4940 generic.go:334] "Generic (PLEG): container finished" podID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerID="dc20f57edcfa44244ea6d88fdaf1d7054e4d108a01a19d7638c4e7761ad7552b" exitCode=0 Nov 26 08:59:20 crc kubenswrapper[4940]: I1126 08:59:20.948687 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pndmx" event={"ID":"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8","Type":"ContainerDied","Data":"dc20f57edcfa44244ea6d88fdaf1d7054e4d108a01a19d7638c4e7761ad7552b"} Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.006835 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zd6rc"] Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.009455 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.027802 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zd6rc"] Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.128202 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-utilities\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.129591 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-catalog-content\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.129714 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbfwv\" (UniqueName: \"kubernetes.io/projected/babf853d-ca8b-464a-97cf-70c9a6c4fed4-kube-api-access-hbfwv\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.232661 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-catalog-content\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.232763 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbfwv\" (UniqueName: \"kubernetes.io/projected/babf853d-ca8b-464a-97cf-70c9a6c4fed4-kube-api-access-hbfwv\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.232930 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-utilities\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.234576 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-utilities\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.234690 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-catalog-content\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.253169 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbfwv\" (UniqueName: \"kubernetes.io/projected/babf853d-ca8b-464a-97cf-70c9a6c4fed4-kube-api-access-hbfwv\") pod \"redhat-operators-zd6rc\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:22 crc kubenswrapper[4940]: I1126 08:59:22.346389 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:25 crc kubenswrapper[4940]: I1126 08:59:25.017137 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zd6rc"] Nov 26 08:59:25 crc kubenswrapper[4940]: I1126 08:59:25.017978 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pndmx" event={"ID":"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8","Type":"ContainerStarted","Data":"30d251208e34e27a80b39feec2b8a327dd66c22558fac128f49161103d8de0c0"} Nov 26 08:59:25 crc kubenswrapper[4940]: I1126 08:59:25.048226 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pndmx" podStartSLOduration=2.274061657 podStartE2EDuration="8.048207215s" podCreationTimestamp="2025-11-26 08:59:17 +0000 UTC" firstStartedPulling="2025-11-26 08:59:18.913194089 +0000 UTC m=+7460.433335708" lastFinishedPulling="2025-11-26 08:59:24.687339647 +0000 UTC m=+7466.207481266" observedRunningTime="2025-11-26 08:59:25.0352702 +0000 UTC m=+7466.555411819" watchObservedRunningTime="2025-11-26 08:59:25.048207215 +0000 UTC m=+7466.568348834" Nov 26 08:59:26 crc kubenswrapper[4940]: I1126 08:59:26.031597 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-ddsss" event={"ID":"f9669e1c-3fe0-49e5-bf17-012bf16e9944","Type":"ContainerStarted","Data":"7156b95c49d84fa1b4c6984d73e01fb28a808dd8197d59bbd97be99d5590b5cb"} Nov 26 08:59:26 crc kubenswrapper[4940]: I1126 08:59:26.037512 4940 generic.go:334] "Generic (PLEG): container finished" podID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerID="89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561" exitCode=0 Nov 26 08:59:26 crc kubenswrapper[4940]: I1126 08:59:26.038357 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zd6rc" event={"ID":"babf853d-ca8b-464a-97cf-70c9a6c4fed4","Type":"ContainerDied","Data":"89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561"} Nov 26 08:59:26 crc kubenswrapper[4940]: I1126 08:59:26.038392 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zd6rc" event={"ID":"babf853d-ca8b-464a-97cf-70c9a6c4fed4","Type":"ContainerStarted","Data":"f9f3f65fafecdae255c7a96cb4e80e12cab72d8e37d257877d31acec5ab778e2"} Nov 26 08:59:26 crc kubenswrapper[4940]: I1126 08:59:26.087095 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-ddsss" podStartSLOduration=3.081611811 podStartE2EDuration="9.087076084s" podCreationTimestamp="2025-11-26 08:59:17 +0000 UTC" firstStartedPulling="2025-11-26 08:59:18.683209787 +0000 UTC m=+7460.203351406" lastFinishedPulling="2025-11-26 08:59:24.68867406 +0000 UTC m=+7466.208815679" observedRunningTime="2025-11-26 08:59:26.056627706 +0000 UTC m=+7467.576769325" watchObservedRunningTime="2025-11-26 08:59:26.087076084 +0000 UTC m=+7467.607217703" Nov 26 08:59:27 crc kubenswrapper[4940]: I1126 08:59:27.050021 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zd6rc" event={"ID":"babf853d-ca8b-464a-97cf-70c9a6c4fed4","Type":"ContainerStarted","Data":"6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1"} Nov 26 08:59:27 crc kubenswrapper[4940]: I1126 08:59:27.936176 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:27 crc kubenswrapper[4940]: I1126 08:59:27.936529 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:28 crc kubenswrapper[4940]: I1126 08:59:28.031654 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 08:59:28 crc kubenswrapper[4940]: I1126 08:59:28.079091 4940 generic.go:334] "Generic (PLEG): container finished" podID="f9669e1c-3fe0-49e5-bf17-012bf16e9944" containerID="7156b95c49d84fa1b4c6984d73e01fb28a808dd8197d59bbd97be99d5590b5cb" exitCode=0 Nov 26 08:59:28 crc kubenswrapper[4940]: I1126 08:59:28.079184 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-ddsss" event={"ID":"f9669e1c-3fe0-49e5-bf17-012bf16e9944","Type":"ContainerDied","Data":"7156b95c49d84fa1b4c6984d73e01fb28a808dd8197d59bbd97be99d5590b5cb"} Nov 26 08:59:28 crc kubenswrapper[4940]: I1126 08:59:28.989125 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-pndmx" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="registry-server" probeResult="failure" output=< Nov 26 08:59:28 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 08:59:28 crc kubenswrapper[4940]: > Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.606802 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.702627 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-job-config-data\") pod \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.702713 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-config-data\") pod \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.703195 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnppm\" (UniqueName: \"kubernetes.io/projected/f9669e1c-3fe0-49e5-bf17-012bf16e9944-kube-api-access-gnppm\") pod \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.703358 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-combined-ca-bundle\") pod \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\" (UID: \"f9669e1c-3fe0-49e5-bf17-012bf16e9944\") " Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.707850 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "f9669e1c-3fe0-49e5-bf17-012bf16e9944" (UID: "f9669e1c-3fe0-49e5-bf17-012bf16e9944"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.711349 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-config-data" (OuterVolumeSpecName: "config-data") pod "f9669e1c-3fe0-49e5-bf17-012bf16e9944" (UID: "f9669e1c-3fe0-49e5-bf17-012bf16e9944"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.714187 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9669e1c-3fe0-49e5-bf17-012bf16e9944-kube-api-access-gnppm" (OuterVolumeSpecName: "kube-api-access-gnppm") pod "f9669e1c-3fe0-49e5-bf17-012bf16e9944" (UID: "f9669e1c-3fe0-49e5-bf17-012bf16e9944"). InnerVolumeSpecName "kube-api-access-gnppm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.734269 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f9669e1c-3fe0-49e5-bf17-012bf16e9944" (UID: "f9669e1c-3fe0-49e5-bf17-012bf16e9944"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.805371 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnppm\" (UniqueName: \"kubernetes.io/projected/f9669e1c-3fe0-49e5-bf17-012bf16e9944-kube-api-access-gnppm\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.805410 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.805422 4940 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:29 crc kubenswrapper[4940]: I1126 08:59:29.805437 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9669e1c-3fe0-49e5-bf17-012bf16e9944-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.102686 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-ddsss" event={"ID":"f9669e1c-3fe0-49e5-bf17-012bf16e9944","Type":"ContainerDied","Data":"d2d4b520306a555bad452484aa294d7bea05d58b684f7033149c7199bcedb308"} Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.102750 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2d4b520306a555bad452484aa294d7bea05d58b684f7033149c7199bcedb308" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.102705 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-ddsss" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.104533 4940 generic.go:334] "Generic (PLEG): container finished" podID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerID="6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1" exitCode=0 Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.104569 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zd6rc" event={"ID":"babf853d-ca8b-464a-97cf-70c9a6c4fed4","Type":"ContainerDied","Data":"6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1"} Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.388621 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 26 08:59:30 crc kubenswrapper[4940]: E1126 08:59:30.389751 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9669e1c-3fe0-49e5-bf17-012bf16e9944" containerName="manila-db-sync" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.389786 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9669e1c-3fe0-49e5-bf17-012bf16e9944" containerName="manila-db-sync" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.390004 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9669e1c-3fe0-49e5-bf17-012bf16e9944" containerName="manila-db-sync" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.397750 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.402565 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.402826 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.403015 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.403164 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-d7qjg" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.425881 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-scripts\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.425951 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.425980 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.426002 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-config-data\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.426093 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nnnn\" (UniqueName: \"kubernetes.io/projected/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-kube-api-access-9nnnn\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.426131 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.426316 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.428367 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.442265 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.455196 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.487371 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84bd68d965-jc846"] Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.490321 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.513129 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bd68d965-jc846"] Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530204 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-config-data\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530258 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530289 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-scripts\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530335 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-scripts\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530357 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftkmk\" (UniqueName: \"kubernetes.io/projected/75175e73-2a8a-428f-b0e2-b1f037ab52df-kube-api-access-ftkmk\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530382 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmdw7\" (UniqueName: \"kubernetes.io/projected/c395967c-afac-4a37-9c88-715f6297c9ee-kube-api-access-mmdw7\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530408 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-nb\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530433 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-dns-svc\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530461 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530477 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c395967c-afac-4a37-9c88-715f6297c9ee-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530503 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530523 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-config\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530560 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/c395967c-afac-4a37-9c88-715f6297c9ee-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530577 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-config-data\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530611 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-sb\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530671 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nnnn\" (UniqueName: \"kubernetes.io/projected/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-kube-api-access-9nnnn\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530706 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c395967c-afac-4a37-9c88-715f6297c9ee-ceph\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530728 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.530748 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.534780 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-scripts\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.534867 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.542302 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.550392 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-config-data\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.552305 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.553489 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.559244 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nnnn\" (UniqueName: \"kubernetes.io/projected/f4ad7574-4285-4af3-9ee6-ab07aa65f83f-kube-api-access-9nnnn\") pod \"manila-scheduler-0\" (UID: \"f4ad7574-4285-4af3-9ee6-ab07aa65f83f\") " pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.615824 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.617502 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.621472 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632394 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c395967c-afac-4a37-9c88-715f6297c9ee-ceph\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632448 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632490 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-config-data\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632519 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632543 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-scripts\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632563 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftkmk\" (UniqueName: \"kubernetes.io/projected/75175e73-2a8a-428f-b0e2-b1f037ab52df-kube-api-access-ftkmk\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632586 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmdw7\" (UniqueName: \"kubernetes.io/projected/c395967c-afac-4a37-9c88-715f6297c9ee-kube-api-access-mmdw7\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632610 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-nb\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632632 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-dns-svc\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632662 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c395967c-afac-4a37-9c88-715f6297c9ee-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632689 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/c395967c-afac-4a37-9c88-715f6297c9ee-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632707 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-config\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.632753 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-sb\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.633639 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-sb\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.634765 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-dns-svc\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.636534 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-nb\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.637538 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.637635 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.637681 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c395967c-afac-4a37-9c88-715f6297c9ee-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.638340 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-config\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.644369 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-scripts\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.648532 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/c395967c-afac-4a37-9c88-715f6297c9ee-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.658096 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.658615 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c395967c-afac-4a37-9c88-715f6297c9ee-config-data\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.664372 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c395967c-afac-4a37-9c88-715f6297c9ee-ceph\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.676054 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmdw7\" (UniqueName: \"kubernetes.io/projected/c395967c-afac-4a37-9c88-715f6297c9ee-kube-api-access-mmdw7\") pod \"manila-share-share1-0\" (UID: \"c395967c-afac-4a37-9c88-715f6297c9ee\") " pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.676572 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftkmk\" (UniqueName: \"kubernetes.io/projected/75175e73-2a8a-428f-b0e2-b1f037ab52df-kube-api-access-ftkmk\") pod \"dnsmasq-dns-84bd68d965-jc846\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.729382 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.748735 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66p54\" (UniqueName: \"kubernetes.io/projected/0556829d-ebd6-4c87-8d39-dfecb84851d1-kube-api-access-66p54\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.748935 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-scripts\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.748981 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-config-data\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.749025 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-config-data-custom\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.749057 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0556829d-ebd6-4c87-8d39-dfecb84851d1-logs\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.749080 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0556829d-ebd6-4c87-8d39-dfecb84851d1-etc-machine-id\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.749159 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.754664 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.811507 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.850450 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-scripts\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.850493 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-config-data\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.850538 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-config-data-custom\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.850559 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0556829d-ebd6-4c87-8d39-dfecb84851d1-logs\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.850583 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0556829d-ebd6-4c87-8d39-dfecb84851d1-etc-machine-id\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.850644 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.850672 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66p54\" (UniqueName: \"kubernetes.io/projected/0556829d-ebd6-4c87-8d39-dfecb84851d1-kube-api-access-66p54\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.851968 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0556829d-ebd6-4c87-8d39-dfecb84851d1-etc-machine-id\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.852974 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0556829d-ebd6-4c87-8d39-dfecb84851d1-logs\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.858110 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-config-data\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.863397 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-scripts\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.865315 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-config-data-custom\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.865386 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0556829d-ebd6-4c87-8d39-dfecb84851d1-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:30 crc kubenswrapper[4940]: I1126 08:59:30.876570 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66p54\" (UniqueName: \"kubernetes.io/projected/0556829d-ebd6-4c87-8d39-dfecb84851d1-kube-api-access-66p54\") pod \"manila-api-0\" (UID: \"0556829d-ebd6-4c87-8d39-dfecb84851d1\") " pod="openstack/manila-api-0" Nov 26 08:59:31 crc kubenswrapper[4940]: I1126 08:59:31.068166 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 26 08:59:31 crc kubenswrapper[4940]: I1126 08:59:31.167622 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:59:31 crc kubenswrapper[4940]: E1126 08:59:31.167879 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:59:31 crc kubenswrapper[4940]: I1126 08:59:31.431122 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 26 08:59:31 crc kubenswrapper[4940]: I1126 08:59:31.468380 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 26 08:59:31 crc kubenswrapper[4940]: W1126 08:59:31.470968 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4ad7574_4285_4af3_9ee6_ab07aa65f83f.slice/crio-b6c2c261a2f77b98fd9642528b32b1936ff6b999057071db63ce6a4ccc632410 WatchSource:0}: Error finding container b6c2c261a2f77b98fd9642528b32b1936ff6b999057071db63ce6a4ccc632410: Status 404 returned error can't find the container with id b6c2c261a2f77b98fd9642528b32b1936ff6b999057071db63ce6a4ccc632410 Nov 26 08:59:31 crc kubenswrapper[4940]: I1126 08:59:31.590178 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bd68d965-jc846"] Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.128320 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zd6rc" event={"ID":"babf853d-ca8b-464a-97cf-70c9a6c4fed4","Type":"ContainerStarted","Data":"20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b"} Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.134944 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"c395967c-afac-4a37-9c88-715f6297c9ee","Type":"ContainerStarted","Data":"a5f1e147a4dd2bc99f57f047b9e11f07c4cadfc88d77fc261efba0b5348045f8"} Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.134982 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"f4ad7574-4285-4af3-9ee6-ab07aa65f83f","Type":"ContainerStarted","Data":"b6c2c261a2f77b98fd9642528b32b1936ff6b999057071db63ce6a4ccc632410"} Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.137734 4940 generic.go:334] "Generic (PLEG): container finished" podID="75175e73-2a8a-428f-b0e2-b1f037ab52df" containerID="3c68fc0a48babfaa78386513355559ba4b26efc000bc77d55e8c39e24fdf54d5" exitCode=0 Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.137784 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd68d965-jc846" event={"ID":"75175e73-2a8a-428f-b0e2-b1f037ab52df","Type":"ContainerDied","Data":"3c68fc0a48babfaa78386513355559ba4b26efc000bc77d55e8c39e24fdf54d5"} Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.137809 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd68d965-jc846" event={"ID":"75175e73-2a8a-428f-b0e2-b1f037ab52df","Type":"ContainerStarted","Data":"e1065f5e38e3d9b05f784149c2513f1908106593249b90038170710230d660cb"} Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.209744 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zd6rc" podStartSLOduration=5.659175151 podStartE2EDuration="11.209724933s" podCreationTimestamp="2025-11-26 08:59:21 +0000 UTC" firstStartedPulling="2025-11-26 08:59:26.04090298 +0000 UTC m=+7467.561044599" lastFinishedPulling="2025-11-26 08:59:31.591452752 +0000 UTC m=+7473.111594381" observedRunningTime="2025-11-26 08:59:32.170547034 +0000 UTC m=+7473.690688653" watchObservedRunningTime="2025-11-26 08:59:32.209724933 +0000 UTC m=+7473.729866562" Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.231615 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.346569 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:32 crc kubenswrapper[4940]: I1126 08:59:32.346620 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.150852 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"f4ad7574-4285-4af3-9ee6-ab07aa65f83f","Type":"ContainerStarted","Data":"6dfa64831ac5822ef855cdbfd32ea19bff4484c46581f38f64a408b027734a68"} Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.152158 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"f4ad7574-4285-4af3-9ee6-ab07aa65f83f","Type":"ContainerStarted","Data":"7b3eac5ccee992d7cbce853568d9bc97dce6523bd28c7531bcf41c99332a11b1"} Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.156390 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd68d965-jc846" event={"ID":"75175e73-2a8a-428f-b0e2-b1f037ab52df","Type":"ContainerStarted","Data":"071d42c22df1b31ff91552e1f8807a36130ada344790ae7e9c1385fe59965fed"} Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.156605 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.178963 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=2.807130043 podStartE2EDuration="3.178939463s" podCreationTimestamp="2025-11-26 08:59:30 +0000 UTC" firstStartedPulling="2025-11-26 08:59:31.58672063 +0000 UTC m=+7473.106862249" lastFinishedPulling="2025-11-26 08:59:31.95853005 +0000 UTC m=+7473.478671669" observedRunningTime="2025-11-26 08:59:33.16547943 +0000 UTC m=+7474.685621049" watchObservedRunningTime="2025-11-26 08:59:33.178939463 +0000 UTC m=+7474.699081082" Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.191899 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"0556829d-ebd6-4c87-8d39-dfecb84851d1","Type":"ContainerStarted","Data":"a314195f861344af9086bcccc423d7b23f2a5d917f739c024cfd5e535acf43f9"} Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.191941 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"0556829d-ebd6-4c87-8d39-dfecb84851d1","Type":"ContainerStarted","Data":"366d4abe0325daff91462cf6335563edd5b9ba47ec8180d9c7d4f77414f47422"} Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.193859 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84bd68d965-jc846" podStartSLOduration=3.193844012 podStartE2EDuration="3.193844012s" podCreationTimestamp="2025-11-26 08:59:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:59:33.182958472 +0000 UTC m=+7474.703100091" watchObservedRunningTime="2025-11-26 08:59:33.193844012 +0000 UTC m=+7474.713985621" Nov 26 08:59:33 crc kubenswrapper[4940]: I1126 08:59:33.464848 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zd6rc" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="registry-server" probeResult="failure" output=< Nov 26 08:59:33 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 08:59:33 crc kubenswrapper[4940]: > Nov 26 08:59:34 crc kubenswrapper[4940]: I1126 08:59:34.192089 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"0556829d-ebd6-4c87-8d39-dfecb84851d1","Type":"ContainerStarted","Data":"591151ccf2fb95cfc5c03ba1f6ce6da688440605885a475560b4273296271ab8"} Nov 26 08:59:34 crc kubenswrapper[4940]: I1126 08:59:34.238859 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=4.238840208 podStartE2EDuration="4.238840208s" podCreationTimestamp="2025-11-26 08:59:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:59:34.228428113 +0000 UTC m=+7475.748569802" watchObservedRunningTime="2025-11-26 08:59:34.238840208 +0000 UTC m=+7475.758981817" Nov 26 08:59:34 crc kubenswrapper[4940]: I1126 08:59:34.719452 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:34 crc kubenswrapper[4940]: I1126 08:59:34.719793 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="ceilometer-central-agent" containerID="cri-o://25da152aad697c9acf066a165ee837505c27424621513e0784ac56a05f7fff52" gracePeriod=30 Nov 26 08:59:34 crc kubenswrapper[4940]: I1126 08:59:34.719826 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="proxy-httpd" containerID="cri-o://3753b63fd2840eac999c436c7e8d5a13044b933eb1e06494d6ee45f4caad1475" gracePeriod=30 Nov 26 08:59:34 crc kubenswrapper[4940]: I1126 08:59:34.719936 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="sg-core" containerID="cri-o://dd9e8c6230bd914b78ae2d1a2f6186755df23f3ec52c13ab2e8ca8453b94a992" gracePeriod=30 Nov 26 08:59:34 crc kubenswrapper[4940]: I1126 08:59:34.720003 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="ceilometer-notification-agent" containerID="cri-o://8c0f9554e6ab17408a1f1c872b29999fbf74f97a19aa9b5aae157994b35bcff7" gracePeriod=30 Nov 26 08:59:35 crc kubenswrapper[4940]: I1126 08:59:35.218567 4940 generic.go:334] "Generic (PLEG): container finished" podID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerID="3753b63fd2840eac999c436c7e8d5a13044b933eb1e06494d6ee45f4caad1475" exitCode=0 Nov 26 08:59:35 crc kubenswrapper[4940]: I1126 08:59:35.218820 4940 generic.go:334] "Generic (PLEG): container finished" podID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerID="dd9e8c6230bd914b78ae2d1a2f6186755df23f3ec52c13ab2e8ca8453b94a992" exitCode=2 Nov 26 08:59:35 crc kubenswrapper[4940]: I1126 08:59:35.218831 4940 generic.go:334] "Generic (PLEG): container finished" podID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerID="25da152aad697c9acf066a165ee837505c27424621513e0784ac56a05f7fff52" exitCode=0 Nov 26 08:59:35 crc kubenswrapper[4940]: I1126 08:59:35.218645 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerDied","Data":"3753b63fd2840eac999c436c7e8d5a13044b933eb1e06494d6ee45f4caad1475"} Nov 26 08:59:35 crc kubenswrapper[4940]: I1126 08:59:35.218893 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerDied","Data":"dd9e8c6230bd914b78ae2d1a2f6186755df23f3ec52c13ab2e8ca8453b94a992"} Nov 26 08:59:35 crc kubenswrapper[4940]: I1126 08:59:35.218915 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerDied","Data":"25da152aad697c9acf066a165ee837505c27424621513e0784ac56a05f7fff52"} Nov 26 08:59:35 crc kubenswrapper[4940]: I1126 08:59:35.219023 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 26 08:59:37 crc kubenswrapper[4940]: I1126 08:59:37.997893 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:38 crc kubenswrapper[4940]: I1126 08:59:38.069858 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:38 crc kubenswrapper[4940]: I1126 08:59:38.241854 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pndmx"] Nov 26 08:59:38 crc kubenswrapper[4940]: I1126 08:59:38.261197 4940 generic.go:334] "Generic (PLEG): container finished" podID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerID="8c0f9554e6ab17408a1f1c872b29999fbf74f97a19aa9b5aae157994b35bcff7" exitCode=0 Nov 26 08:59:38 crc kubenswrapper[4940]: I1126 08:59:38.261697 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerDied","Data":"8c0f9554e6ab17408a1f1c872b29999fbf74f97a19aa9b5aae157994b35bcff7"} Nov 26 08:59:39 crc kubenswrapper[4940]: I1126 08:59:39.270792 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pndmx" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="registry-server" containerID="cri-o://30d251208e34e27a80b39feec2b8a327dd66c22558fac128f49161103d8de0c0" gracePeriod=2 Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.283235 4940 generic.go:334] "Generic (PLEG): container finished" podID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerID="30d251208e34e27a80b39feec2b8a327dd66c22558fac128f49161103d8de0c0" exitCode=0 Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.283244 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pndmx" event={"ID":"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8","Type":"ContainerDied","Data":"30d251208e34e27a80b39feec2b8a327dd66c22558fac128f49161103d8de0c0"} Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.731308 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.769497 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.814832 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.841715 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-scripts\") pod \"fb9106b6-499d-42d5-a7ac-7ab522135d91\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.841819 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-log-httpd\") pod \"fb9106b6-499d-42d5-a7ac-7ab522135d91\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.841910 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9pz9\" (UniqueName: \"kubernetes.io/projected/fb9106b6-499d-42d5-a7ac-7ab522135d91-kube-api-access-p9pz9\") pod \"fb9106b6-499d-42d5-a7ac-7ab522135d91\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.842006 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-combined-ca-bundle\") pod \"fb9106b6-499d-42d5-a7ac-7ab522135d91\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.842068 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-config-data\") pod \"fb9106b6-499d-42d5-a7ac-7ab522135d91\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.842099 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-run-httpd\") pod \"fb9106b6-499d-42d5-a7ac-7ab522135d91\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.842116 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-sg-core-conf-yaml\") pod \"fb9106b6-499d-42d5-a7ac-7ab522135d91\" (UID: \"fb9106b6-499d-42d5-a7ac-7ab522135d91\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.843902 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fb9106b6-499d-42d5-a7ac-7ab522135d91" (UID: "fb9106b6-499d-42d5-a7ac-7ab522135d91"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.844190 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fb9106b6-499d-42d5-a7ac-7ab522135d91" (UID: "fb9106b6-499d-42d5-a7ac-7ab522135d91"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.846276 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.846301 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb9106b6-499d-42d5-a7ac-7ab522135d91-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.853556 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb9106b6-499d-42d5-a7ac-7ab522135d91-kube-api-access-p9pz9" (OuterVolumeSpecName: "kube-api-access-p9pz9") pod "fb9106b6-499d-42d5-a7ac-7ab522135d91" (UID: "fb9106b6-499d-42d5-a7ac-7ab522135d91"). InnerVolumeSpecName "kube-api-access-p9pz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.854929 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-scripts" (OuterVolumeSpecName: "scripts") pod "fb9106b6-499d-42d5-a7ac-7ab522135d91" (UID: "fb9106b6-499d-42d5-a7ac-7ab522135d91"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.859706 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.907805 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8dd447d7-6lp69"] Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.908757 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" podUID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" containerName="dnsmasq-dns" containerID="cri-o://7f0ecccbce780f2ce5f44411070e4ccdad15455ae896bf8a222cd994dcbecf7a" gracePeriod=10 Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.947461 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-utilities\") pod \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.947527 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5jzk\" (UniqueName: \"kubernetes.io/projected/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-kube-api-access-r5jzk\") pod \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.948153 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-catalog-content\") pod \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\" (UID: \"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8\") " Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.950142 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9pz9\" (UniqueName: \"kubernetes.io/projected/fb9106b6-499d-42d5-a7ac-7ab522135d91-kube-api-access-p9pz9\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.950194 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.950798 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-utilities" (OuterVolumeSpecName: "utilities") pod "ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" (UID: "ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:59:40 crc kubenswrapper[4940]: I1126 08:59:40.964757 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-kube-api-access-r5jzk" (OuterVolumeSpecName: "kube-api-access-r5jzk") pod "ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" (UID: "ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8"). InnerVolumeSpecName "kube-api-access-r5jzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.003930 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" (UID: "ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.052807 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.052843 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5jzk\" (UniqueName: \"kubernetes.io/projected/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-kube-api-access-r5jzk\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.052857 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.073592 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fb9106b6-499d-42d5-a7ac-7ab522135d91" (UID: "fb9106b6-499d-42d5-a7ac-7ab522135d91"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.110608 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb9106b6-499d-42d5-a7ac-7ab522135d91" (UID: "fb9106b6-499d-42d5-a7ac-7ab522135d91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.138096 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-config-data" (OuterVolumeSpecName: "config-data") pod "fb9106b6-499d-42d5-a7ac-7ab522135d91" (UID: "fb9106b6-499d-42d5-a7ac-7ab522135d91"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.154391 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.154432 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.154443 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9106b6-499d-42d5-a7ac-7ab522135d91-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.310033 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pndmx" event={"ID":"ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8","Type":"ContainerDied","Data":"71611586fe40c7d1b7e417a663cb334a9100c202c7114509e894cf8a5a4b6f7c"} Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.310127 4940 scope.go:117] "RemoveContainer" containerID="30d251208e34e27a80b39feec2b8a327dd66c22558fac128f49161103d8de0c0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.310565 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pndmx" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.324485 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"c395967c-afac-4a37-9c88-715f6297c9ee","Type":"ContainerStarted","Data":"8f78ad2a14adc40f7ad019812441f491e167a47d35b424f82f74c53e94dccc2f"} Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.371155 4940 generic.go:334] "Generic (PLEG): container finished" podID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" containerID="7f0ecccbce780f2ce5f44411070e4ccdad15455ae896bf8a222cd994dcbecf7a" exitCode=0 Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.371264 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" event={"ID":"26c48474-6fec-46fb-8dd8-64d5c941f5cc","Type":"ContainerDied","Data":"7f0ecccbce780f2ce5f44411070e4ccdad15455ae896bf8a222cd994dcbecf7a"} Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.373478 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pndmx"] Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.391357 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fb9106b6-499d-42d5-a7ac-7ab522135d91","Type":"ContainerDied","Data":"0d80df9f7cf780908c025bd0160142be3da8772349adaff82afeda07c2fd1528"} Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.391603 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.479334 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pndmx"] Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.502664 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.506080 4940 scope.go:117] "RemoveContainer" containerID="dc20f57edcfa44244ea6d88fdaf1d7054e4d108a01a19d7638c4e7761ad7552b" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.521608 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:41 crc kubenswrapper[4940]: E1126 08:59:41.540836 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb9106b6_499d_42d5_a7ac_7ab522135d91.slice/crio-0d80df9f7cf780908c025bd0160142be3da8772349adaff82afeda07c2fd1528\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb9106b6_499d_42d5_a7ac_7ab522135d91.slice\": RecentStats: unable to find data in memory cache]" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.563553 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:41 crc kubenswrapper[4940]: E1126 08:59:41.563991 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="extract-utilities" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564008 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="extract-utilities" Nov 26 08:59:41 crc kubenswrapper[4940]: E1126 08:59:41.564028 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="ceilometer-central-agent" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564048 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="ceilometer-central-agent" Nov 26 08:59:41 crc kubenswrapper[4940]: E1126 08:59:41.564060 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="proxy-httpd" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564067 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="proxy-httpd" Nov 26 08:59:41 crc kubenswrapper[4940]: E1126 08:59:41.564087 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="sg-core" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564093 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="sg-core" Nov 26 08:59:41 crc kubenswrapper[4940]: E1126 08:59:41.564116 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="extract-content" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564122 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="extract-content" Nov 26 08:59:41 crc kubenswrapper[4940]: E1126 08:59:41.564135 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="registry-server" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564141 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="registry-server" Nov 26 08:59:41 crc kubenswrapper[4940]: E1126 08:59:41.564165 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="ceilometer-notification-agent" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564171 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="ceilometer-notification-agent" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564348 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="proxy-httpd" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564369 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="ceilometer-central-agent" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564378 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" containerName="registry-server" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564390 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="sg-core" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.564402 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" containerName="ceilometer-notification-agent" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.566232 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.568432 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.568617 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.573807 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.640966 4940 scope.go:117] "RemoveContainer" containerID="9482552e818f547975dfcc8fabcd71772e698acf55cfb7039606bb4ddbf66876" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.653559 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.699053 4940 scope.go:117] "RemoveContainer" containerID="3753b63fd2840eac999c436c7e8d5a13044b933eb1e06494d6ee45f4caad1475" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.712908 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-run-httpd\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.712951 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt64g\" (UniqueName: \"kubernetes.io/projected/f5f39468-6914-414b-9c2b-6a7c0189270d-kube-api-access-nt64g\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.713092 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-log-httpd\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.713476 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-config-data\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.713566 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.713626 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-scripts\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.713655 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.740433 4940 scope.go:117] "RemoveContainer" containerID="dd9e8c6230bd914b78ae2d1a2f6186755df23f3ec52c13ab2e8ca8453b94a992" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.763130 4940 scope.go:117] "RemoveContainer" containerID="8c0f9554e6ab17408a1f1c872b29999fbf74f97a19aa9b5aae157994b35bcff7" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.787103 4940 scope.go:117] "RemoveContainer" containerID="25da152aad697c9acf066a165ee837505c27424621513e0784ac56a05f7fff52" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.822142 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-nb\") pod \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.822750 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-config\") pod \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.822913 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2khsh\" (UniqueName: \"kubernetes.io/projected/26c48474-6fec-46fb-8dd8-64d5c941f5cc-kube-api-access-2khsh\") pod \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.822983 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-sb\") pod \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.823275 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-dns-svc\") pod \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\" (UID: \"26c48474-6fec-46fb-8dd8-64d5c941f5cc\") " Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.823849 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-run-httpd\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.823889 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt64g\" (UniqueName: \"kubernetes.io/projected/f5f39468-6914-414b-9c2b-6a7c0189270d-kube-api-access-nt64g\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.824291 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-log-httpd\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.824530 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-log-httpd\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.824329 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-run-httpd\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.824792 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-config-data\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.824894 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.824942 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-scripts\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.824965 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.827766 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26c48474-6fec-46fb-8dd8-64d5c941f5cc-kube-api-access-2khsh" (OuterVolumeSpecName: "kube-api-access-2khsh") pod "26c48474-6fec-46fb-8dd8-64d5c941f5cc" (UID: "26c48474-6fec-46fb-8dd8-64d5c941f5cc"). InnerVolumeSpecName "kube-api-access-2khsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.828818 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.831591 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-config-data\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.833524 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.848888 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-scripts\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.852895 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt64g\" (UniqueName: \"kubernetes.io/projected/f5f39468-6914-414b-9c2b-6a7c0189270d-kube-api-access-nt64g\") pod \"ceilometer-0\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " pod="openstack/ceilometer-0" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.884691 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-config" (OuterVolumeSpecName: "config") pod "26c48474-6fec-46fb-8dd8-64d5c941f5cc" (UID: "26c48474-6fec-46fb-8dd8-64d5c941f5cc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.887586 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "26c48474-6fec-46fb-8dd8-64d5c941f5cc" (UID: "26c48474-6fec-46fb-8dd8-64d5c941f5cc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.898694 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "26c48474-6fec-46fb-8dd8-64d5c941f5cc" (UID: "26c48474-6fec-46fb-8dd8-64d5c941f5cc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.901847 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "26c48474-6fec-46fb-8dd8-64d5c941f5cc" (UID: "26c48474-6fec-46fb-8dd8-64d5c941f5cc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.927364 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.927399 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.927411 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.927420 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2khsh\" (UniqueName: \"kubernetes.io/projected/26c48474-6fec-46fb-8dd8-64d5c941f5cc-kube-api-access-2khsh\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:41 crc kubenswrapper[4940]: I1126 08:59:41.927428 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/26c48474-6fec-46fb-8dd8-64d5c941f5cc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.027611 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.411722 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"c395967c-afac-4a37-9c88-715f6297c9ee","Type":"ContainerStarted","Data":"e20535be39660f8c7d65e2825335d5e5a4b7da53bb10ea10f10fe95bcd12df5c"} Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.415297 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" event={"ID":"26c48474-6fec-46fb-8dd8-64d5c941f5cc","Type":"ContainerDied","Data":"3910b400caf914495146972c200e07df8a20f6e0d0dfc94b660f6c0da4f8c303"} Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.415357 4940 scope.go:117] "RemoveContainer" containerID="7f0ecccbce780f2ce5f44411070e4ccdad15455ae896bf8a222cd994dcbecf7a" Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.415479 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8dd447d7-6lp69" Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.460491 4940 scope.go:117] "RemoveContainer" containerID="8b9626e8993254c771599f187cc3b92b4c14f52e6730f9df88b5c8953dd69b7e" Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.470810 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.63904649 podStartE2EDuration="12.470791029s" podCreationTimestamp="2025-11-26 08:59:30 +0000 UTC" firstStartedPulling="2025-11-26 08:59:31.586930846 +0000 UTC m=+7473.107072465" lastFinishedPulling="2025-11-26 08:59:40.418675385 +0000 UTC m=+7481.938817004" observedRunningTime="2025-11-26 08:59:42.449228325 +0000 UTC m=+7483.969369944" watchObservedRunningTime="2025-11-26 08:59:42.470791029 +0000 UTC m=+7483.990932648" Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.493264 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8dd447d7-6lp69"] Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.511926 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b8dd447d7-6lp69"] Nov 26 08:59:42 crc kubenswrapper[4940]: I1126 08:59:42.524156 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:42 crc kubenswrapper[4940]: W1126 08:59:42.531060 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5f39468_6914_414b_9c2b_6a7c0189270d.slice/crio-a7900de362dbf304e82309123826fe8b484d4d0f10d380d934e1d2653d64166c WatchSource:0}: Error finding container a7900de362dbf304e82309123826fe8b484d4d0f10d380d934e1d2653d64166c: Status 404 returned error can't find the container with id a7900de362dbf304e82309123826fe8b484d4d0f10d380d934e1d2653d64166c Nov 26 08:59:43 crc kubenswrapper[4940]: I1126 08:59:43.184582 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" path="/var/lib/kubelet/pods/26c48474-6fec-46fb-8dd8-64d5c941f5cc/volumes" Nov 26 08:59:43 crc kubenswrapper[4940]: I1126 08:59:43.185634 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8" path="/var/lib/kubelet/pods/ab40c62c-dee2-423d-9e5d-d28bf9c8f6c8/volumes" Nov 26 08:59:43 crc kubenswrapper[4940]: I1126 08:59:43.186644 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb9106b6-499d-42d5-a7ac-7ab522135d91" path="/var/lib/kubelet/pods/fb9106b6-499d-42d5-a7ac-7ab522135d91/volumes" Nov 26 08:59:43 crc kubenswrapper[4940]: I1126 08:59:43.277723 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:43 crc kubenswrapper[4940]: I1126 08:59:43.423914 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zd6rc" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="registry-server" probeResult="failure" output=< Nov 26 08:59:43 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 08:59:43 crc kubenswrapper[4940]: > Nov 26 08:59:43 crc kubenswrapper[4940]: I1126 08:59:43.463105 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerStarted","Data":"5865d814fa95aa1d757980b07f37ca3ddd748f4e33c236764ba394eaac9f2dca"} Nov 26 08:59:43 crc kubenswrapper[4940]: I1126 08:59:43.463447 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerStarted","Data":"57820e87cb81797850041839f67578b002ba7c390daa4c9b635458b42b0c23c9"} Nov 26 08:59:43 crc kubenswrapper[4940]: I1126 08:59:43.463467 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerStarted","Data":"a7900de362dbf304e82309123826fe8b484d4d0f10d380d934e1d2653d64166c"} Nov 26 08:59:44 crc kubenswrapper[4940]: I1126 08:59:44.166570 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:59:44 crc kubenswrapper[4940]: E1126 08:59:44.167191 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 08:59:44 crc kubenswrapper[4940]: I1126 08:59:44.521802 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerStarted","Data":"da84fccc87551c1a2c144e5151de8297f2e7b40112dd98ea57704d82c4d04ef0"} Nov 26 08:59:46 crc kubenswrapper[4940]: I1126 08:59:46.546351 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerStarted","Data":"2b775a6426fb78b1002f22b6069943b075c3f6daa823ce829a4344e8809b1709"} Nov 26 08:59:46 crc kubenswrapper[4940]: I1126 08:59:46.550271 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 08:59:46 crc kubenswrapper[4940]: I1126 08:59:46.547132 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="proxy-httpd" containerID="cri-o://2b775a6426fb78b1002f22b6069943b075c3f6daa823ce829a4344e8809b1709" gracePeriod=30 Nov 26 08:59:46 crc kubenswrapper[4940]: I1126 08:59:46.546503 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="ceilometer-central-agent" containerID="cri-o://57820e87cb81797850041839f67578b002ba7c390daa4c9b635458b42b0c23c9" gracePeriod=30 Nov 26 08:59:46 crc kubenswrapper[4940]: I1126 08:59:46.547161 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="ceilometer-notification-agent" containerID="cri-o://5865d814fa95aa1d757980b07f37ca3ddd748f4e33c236764ba394eaac9f2dca" gracePeriod=30 Nov 26 08:59:46 crc kubenswrapper[4940]: I1126 08:59:46.547150 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="sg-core" containerID="cri-o://da84fccc87551c1a2c144e5151de8297f2e7b40112dd98ea57704d82c4d04ef0" gracePeriod=30 Nov 26 08:59:46 crc kubenswrapper[4940]: I1126 08:59:46.574992 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.053883459 podStartE2EDuration="5.574973565s" podCreationTimestamp="2025-11-26 08:59:41 +0000 UTC" firstStartedPulling="2025-11-26 08:59:42.533622548 +0000 UTC m=+7484.053764167" lastFinishedPulling="2025-11-26 08:59:46.054712654 +0000 UTC m=+7487.574854273" observedRunningTime="2025-11-26 08:59:46.567510095 +0000 UTC m=+7488.087651724" watchObservedRunningTime="2025-11-26 08:59:46.574973565 +0000 UTC m=+7488.095115184" Nov 26 08:59:47 crc kubenswrapper[4940]: I1126 08:59:47.559103 4940 generic.go:334] "Generic (PLEG): container finished" podID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerID="2b775a6426fb78b1002f22b6069943b075c3f6daa823ce829a4344e8809b1709" exitCode=0 Nov 26 08:59:47 crc kubenswrapper[4940]: I1126 08:59:47.559331 4940 generic.go:334] "Generic (PLEG): container finished" podID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerID="da84fccc87551c1a2c144e5151de8297f2e7b40112dd98ea57704d82c4d04ef0" exitCode=2 Nov 26 08:59:47 crc kubenswrapper[4940]: I1126 08:59:47.559341 4940 generic.go:334] "Generic (PLEG): container finished" podID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerID="5865d814fa95aa1d757980b07f37ca3ddd748f4e33c236764ba394eaac9f2dca" exitCode=0 Nov 26 08:59:47 crc kubenswrapper[4940]: I1126 08:59:47.559181 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerDied","Data":"2b775a6426fb78b1002f22b6069943b075c3f6daa823ce829a4344e8809b1709"} Nov 26 08:59:47 crc kubenswrapper[4940]: I1126 08:59:47.559376 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerDied","Data":"da84fccc87551c1a2c144e5151de8297f2e7b40112dd98ea57704d82c4d04ef0"} Nov 26 08:59:47 crc kubenswrapper[4940]: I1126 08:59:47.559389 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerDied","Data":"5865d814fa95aa1d757980b07f37ca3ddd748f4e33c236764ba394eaac9f2dca"} Nov 26 08:59:48 crc kubenswrapper[4940]: I1126 08:59:48.577568 4940 generic.go:334] "Generic (PLEG): container finished" podID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerID="57820e87cb81797850041839f67578b002ba7c390daa4c9b635458b42b0c23c9" exitCode=0 Nov 26 08:59:48 crc kubenswrapper[4940]: I1126 08:59:48.577786 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerDied","Data":"57820e87cb81797850041839f67578b002ba7c390daa4c9b635458b42b0c23c9"} Nov 26 08:59:48 crc kubenswrapper[4940]: I1126 08:59:48.830705 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.013475 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-combined-ca-bundle\") pod \"f5f39468-6914-414b-9c2b-6a7c0189270d\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.013562 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-run-httpd\") pod \"f5f39468-6914-414b-9c2b-6a7c0189270d\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.013661 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-sg-core-conf-yaml\") pod \"f5f39468-6914-414b-9c2b-6a7c0189270d\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.013691 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt64g\" (UniqueName: \"kubernetes.io/projected/f5f39468-6914-414b-9c2b-6a7c0189270d-kube-api-access-nt64g\") pod \"f5f39468-6914-414b-9c2b-6a7c0189270d\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.013779 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-scripts\") pod \"f5f39468-6914-414b-9c2b-6a7c0189270d\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.013812 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-config-data\") pod \"f5f39468-6914-414b-9c2b-6a7c0189270d\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.013845 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-log-httpd\") pod \"f5f39468-6914-414b-9c2b-6a7c0189270d\" (UID: \"f5f39468-6914-414b-9c2b-6a7c0189270d\") " Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.014360 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f5f39468-6914-414b-9c2b-6a7c0189270d" (UID: "f5f39468-6914-414b-9c2b-6a7c0189270d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.014534 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f5f39468-6914-414b-9c2b-6a7c0189270d" (UID: "f5f39468-6914-414b-9c2b-6a7c0189270d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.019073 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5f39468-6914-414b-9c2b-6a7c0189270d-kube-api-access-nt64g" (OuterVolumeSpecName: "kube-api-access-nt64g") pod "f5f39468-6914-414b-9c2b-6a7c0189270d" (UID: "f5f39468-6914-414b-9c2b-6a7c0189270d"). InnerVolumeSpecName "kube-api-access-nt64g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.019415 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-scripts" (OuterVolumeSpecName: "scripts") pod "f5f39468-6914-414b-9c2b-6a7c0189270d" (UID: "f5f39468-6914-414b-9c2b-6a7c0189270d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.058625 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f5f39468-6914-414b-9c2b-6a7c0189270d" (UID: "f5f39468-6914-414b-9c2b-6a7c0189270d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.099589 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5f39468-6914-414b-9c2b-6a7c0189270d" (UID: "f5f39468-6914-414b-9c2b-6a7c0189270d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.115833 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nt64g\" (UniqueName: \"kubernetes.io/projected/f5f39468-6914-414b-9c2b-6a7c0189270d-kube-api-access-nt64g\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.115859 4940 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.115870 4940 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.115879 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.115888 4940 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5f39468-6914-414b-9c2b-6a7c0189270d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.115897 4940 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.137708 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-config-data" (OuterVolumeSpecName: "config-data") pod "f5f39468-6914-414b-9c2b-6a7c0189270d" (UID: "f5f39468-6914-414b-9c2b-6a7c0189270d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.219865 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5f39468-6914-414b-9c2b-6a7c0189270d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.590803 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5f39468-6914-414b-9c2b-6a7c0189270d","Type":"ContainerDied","Data":"a7900de362dbf304e82309123826fe8b484d4d0f10d380d934e1d2653d64166c"} Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.590849 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.590873 4940 scope.go:117] "RemoveContainer" containerID="2b775a6426fb78b1002f22b6069943b075c3f6daa823ce829a4344e8809b1709" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.618981 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.626252 4940 scope.go:117] "RemoveContainer" containerID="da84fccc87551c1a2c144e5151de8297f2e7b40112dd98ea57704d82c4d04ef0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.630813 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.642924 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:49 crc kubenswrapper[4940]: E1126 08:59:49.643377 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" containerName="init" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643393 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" containerName="init" Nov 26 08:59:49 crc kubenswrapper[4940]: E1126 08:59:49.643414 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" containerName="dnsmasq-dns" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643421 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" containerName="dnsmasq-dns" Nov 26 08:59:49 crc kubenswrapper[4940]: E1126 08:59:49.643437 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="proxy-httpd" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643443 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="proxy-httpd" Nov 26 08:59:49 crc kubenswrapper[4940]: E1126 08:59:49.643462 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="ceilometer-notification-agent" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643468 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="ceilometer-notification-agent" Nov 26 08:59:49 crc kubenswrapper[4940]: E1126 08:59:49.643476 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="ceilometer-central-agent" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643481 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="ceilometer-central-agent" Nov 26 08:59:49 crc kubenswrapper[4940]: E1126 08:59:49.643512 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="sg-core" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643518 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="sg-core" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643706 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="26c48474-6fec-46fb-8dd8-64d5c941f5cc" containerName="dnsmasq-dns" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643721 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="ceilometer-notification-agent" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643736 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="proxy-httpd" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643751 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="ceilometer-central-agent" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.643765 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" containerName="sg-core" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.645762 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.649181 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.649263 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.655495 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.665036 4940 scope.go:117] "RemoveContainer" containerID="5865d814fa95aa1d757980b07f37ca3ddd748f4e33c236764ba394eaac9f2dca" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.712075 4940 scope.go:117] "RemoveContainer" containerID="57820e87cb81797850041839f67578b002ba7c390daa4c9b635458b42b0c23c9" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.834591 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8421aed1-48bb-4b33-9e07-b887dfda721a-log-httpd\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.834686 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-config-data\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.834713 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-scripts\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.834729 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.834757 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.834924 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8421aed1-48bb-4b33-9e07-b887dfda721a-run-httpd\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.835192 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cfr5\" (UniqueName: \"kubernetes.io/projected/8421aed1-48bb-4b33-9e07-b887dfda721a-kube-api-access-7cfr5\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.936654 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-config-data\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.937092 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-scripts\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.937191 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.937314 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.937437 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8421aed1-48bb-4b33-9e07-b887dfda721a-run-httpd\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.937578 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cfr5\" (UniqueName: \"kubernetes.io/projected/8421aed1-48bb-4b33-9e07-b887dfda721a-kube-api-access-7cfr5\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.937735 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8421aed1-48bb-4b33-9e07-b887dfda721a-log-httpd\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.938278 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8421aed1-48bb-4b33-9e07-b887dfda721a-run-httpd\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.938301 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8421aed1-48bb-4b33-9e07-b887dfda721a-log-httpd\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.942496 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-scripts\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.942520 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.952422 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.955274 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cfr5\" (UniqueName: \"kubernetes.io/projected/8421aed1-48bb-4b33-9e07-b887dfda721a-kube-api-access-7cfr5\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.958407 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8421aed1-48bb-4b33-9e07-b887dfda721a-config-data\") pod \"ceilometer-0\" (UID: \"8421aed1-48bb-4b33-9e07-b887dfda721a\") " pod="openstack/ceilometer-0" Nov 26 08:59:49 crc kubenswrapper[4940]: I1126 08:59:49.967756 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 08:59:50 crc kubenswrapper[4940]: I1126 08:59:50.435839 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 08:59:50 crc kubenswrapper[4940]: W1126 08:59:50.436790 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8421aed1_48bb_4b33_9e07_b887dfda721a.slice/crio-23c2bb3005d7292cec69026be50d8519955baaacf75260db4812206d7f9d39b1 WatchSource:0}: Error finding container 23c2bb3005d7292cec69026be50d8519955baaacf75260db4812206d7f9d39b1: Status 404 returned error can't find the container with id 23c2bb3005d7292cec69026be50d8519955baaacf75260db4812206d7f9d39b1 Nov 26 08:59:50 crc kubenswrapper[4940]: I1126 08:59:50.604930 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8421aed1-48bb-4b33-9e07-b887dfda721a","Type":"ContainerStarted","Data":"23c2bb3005d7292cec69026be50d8519955baaacf75260db4812206d7f9d39b1"} Nov 26 08:59:50 crc kubenswrapper[4940]: I1126 08:59:50.755562 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 26 08:59:51 crc kubenswrapper[4940]: I1126 08:59:51.180172 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5f39468-6914-414b-9c2b-6a7c0189270d" path="/var/lib/kubelet/pods/f5f39468-6914-414b-9c2b-6a7c0189270d/volumes" Nov 26 08:59:51 crc kubenswrapper[4940]: I1126 08:59:51.621922 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8421aed1-48bb-4b33-9e07-b887dfda721a","Type":"ContainerStarted","Data":"b41deeb8cace159bd46d6ac779ad2f31855b8d52838cb7038f27b6ada61a19c7"} Nov 26 08:59:51 crc kubenswrapper[4940]: I1126 08:59:51.622348 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8421aed1-48bb-4b33-9e07-b887dfda721a","Type":"ContainerStarted","Data":"17c81b64b11fe6e65a3a2671c7092c87b63038290f986e7b251aaaf2604cf451"} Nov 26 08:59:52 crc kubenswrapper[4940]: I1126 08:59:52.404279 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 26 08:59:52 crc kubenswrapper[4940]: I1126 08:59:52.436775 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 26 08:59:52 crc kubenswrapper[4940]: I1126 08:59:52.633577 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8421aed1-48bb-4b33-9e07-b887dfda721a","Type":"ContainerStarted","Data":"5c5a12700a2437345df88c0e5d65ee0e0b9c595e9eeefba8bdae3ca14cadeaeb"} Nov 26 08:59:53 crc kubenswrapper[4940]: I1126 08:59:53.394156 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zd6rc" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="registry-server" probeResult="failure" output=< Nov 26 08:59:53 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 08:59:53 crc kubenswrapper[4940]: > Nov 26 08:59:53 crc kubenswrapper[4940]: I1126 08:59:53.647886 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8421aed1-48bb-4b33-9e07-b887dfda721a","Type":"ContainerStarted","Data":"8e02ba12f1aff0063a7eb56cb563a733e476c4de99f5d5b101ba0758fa38a14a"} Nov 26 08:59:53 crc kubenswrapper[4940]: I1126 08:59:53.648066 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 08:59:53 crc kubenswrapper[4940]: I1126 08:59:53.677238 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.759591307 podStartE2EDuration="4.677217338s" podCreationTimestamp="2025-11-26 08:59:49 +0000 UTC" firstStartedPulling="2025-11-26 08:59:50.439311773 +0000 UTC m=+7491.959453392" lastFinishedPulling="2025-11-26 08:59:53.356937804 +0000 UTC m=+7494.877079423" observedRunningTime="2025-11-26 08:59:53.669355226 +0000 UTC m=+7495.189496855" watchObservedRunningTime="2025-11-26 08:59:53.677217338 +0000 UTC m=+7495.197358967" Nov 26 08:59:56 crc kubenswrapper[4940]: I1126 08:59:56.166140 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 08:59:56 crc kubenswrapper[4940]: E1126 08:59:56.167014 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.172800 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9"] Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.176081 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.180435 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.180638 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.185164 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9"] Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.263978 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9pxb\" (UniqueName: \"kubernetes.io/projected/31542d31-1cc8-4a7b-a85d-af79874ed4b9-kube-api-access-t9pxb\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.264638 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31542d31-1cc8-4a7b-a85d-af79874ed4b9-secret-volume\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.265189 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31542d31-1cc8-4a7b-a85d-af79874ed4b9-config-volume\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.367577 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9pxb\" (UniqueName: \"kubernetes.io/projected/31542d31-1cc8-4a7b-a85d-af79874ed4b9-kube-api-access-t9pxb\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.367770 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31542d31-1cc8-4a7b-a85d-af79874ed4b9-secret-volume\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.367823 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31542d31-1cc8-4a7b-a85d-af79874ed4b9-config-volume\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.369248 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31542d31-1cc8-4a7b-a85d-af79874ed4b9-config-volume\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.380002 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31542d31-1cc8-4a7b-a85d-af79874ed4b9-secret-volume\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.386328 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9pxb\" (UniqueName: \"kubernetes.io/projected/31542d31-1cc8-4a7b-a85d-af79874ed4b9-kube-api-access-t9pxb\") pod \"collect-profiles-29402460-p7fz9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:00 crc kubenswrapper[4940]: I1126 09:00:00.511232 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:01 crc kubenswrapper[4940]: I1126 09:00:01.034650 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9"] Nov 26 09:00:01 crc kubenswrapper[4940]: I1126 09:00:01.741058 4940 generic.go:334] "Generic (PLEG): container finished" podID="31542d31-1cc8-4a7b-a85d-af79874ed4b9" containerID="4c44ce744c763bbac7282a1e346efc730ce2e29178a9f5cb0c19f94032efaf82" exitCode=0 Nov 26 09:00:01 crc kubenswrapper[4940]: I1126 09:00:01.741325 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" event={"ID":"31542d31-1cc8-4a7b-a85d-af79874ed4b9","Type":"ContainerDied","Data":"4c44ce744c763bbac7282a1e346efc730ce2e29178a9f5cb0c19f94032efaf82"} Nov 26 09:00:01 crc kubenswrapper[4940]: I1126 09:00:01.741382 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" event={"ID":"31542d31-1cc8-4a7b-a85d-af79874ed4b9","Type":"ContainerStarted","Data":"a811a5995ae956e252aeea11ca9a985e131c3d5b14ffe9e86407609222077812"} Nov 26 09:00:02 crc kubenswrapper[4940]: I1126 09:00:02.345256 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 26 09:00:02 crc kubenswrapper[4940]: I1126 09:00:02.418684 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 09:00:02 crc kubenswrapper[4940]: I1126 09:00:02.480691 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 09:00:02 crc kubenswrapper[4940]: I1126 09:00:02.655308 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zd6rc"] Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.214907 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.247336 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31542d31-1cc8-4a7b-a85d-af79874ed4b9-config-volume\") pod \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.247438 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9pxb\" (UniqueName: \"kubernetes.io/projected/31542d31-1cc8-4a7b-a85d-af79874ed4b9-kube-api-access-t9pxb\") pod \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.247604 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31542d31-1cc8-4a7b-a85d-af79874ed4b9-secret-volume\") pod \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\" (UID: \"31542d31-1cc8-4a7b-a85d-af79874ed4b9\") " Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.253163 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31542d31-1cc8-4a7b-a85d-af79874ed4b9-config-volume" (OuterVolumeSpecName: "config-volume") pod "31542d31-1cc8-4a7b-a85d-af79874ed4b9" (UID: "31542d31-1cc8-4a7b-a85d-af79874ed4b9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.269196 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31542d31-1cc8-4a7b-a85d-af79874ed4b9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "31542d31-1cc8-4a7b-a85d-af79874ed4b9" (UID: "31542d31-1cc8-4a7b-a85d-af79874ed4b9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.275771 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/31542d31-1cc8-4a7b-a85d-af79874ed4b9-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.275813 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31542d31-1cc8-4a7b-a85d-af79874ed4b9-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.275980 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31542d31-1cc8-4a7b-a85d-af79874ed4b9-kube-api-access-t9pxb" (OuterVolumeSpecName: "kube-api-access-t9pxb") pod "31542d31-1cc8-4a7b-a85d-af79874ed4b9" (UID: "31542d31-1cc8-4a7b-a85d-af79874ed4b9"). InnerVolumeSpecName "kube-api-access-t9pxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.378574 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9pxb\" (UniqueName: \"kubernetes.io/projected/31542d31-1cc8-4a7b-a85d-af79874ed4b9-kube-api-access-t9pxb\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.770889 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" event={"ID":"31542d31-1cc8-4a7b-a85d-af79874ed4b9","Type":"ContainerDied","Data":"a811a5995ae956e252aeea11ca9a985e131c3d5b14ffe9e86407609222077812"} Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.770972 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a811a5995ae956e252aeea11ca9a985e131c3d5b14ffe9e86407609222077812" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.770926 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9" Nov 26 09:00:03 crc kubenswrapper[4940]: I1126 09:00:03.771572 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zd6rc" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="registry-server" containerID="cri-o://20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b" gracePeriod=2 Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.300926 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9"] Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.305211 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.311843 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-w8vq9"] Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.402974 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-catalog-content\") pod \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.403337 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbfwv\" (UniqueName: \"kubernetes.io/projected/babf853d-ca8b-464a-97cf-70c9a6c4fed4-kube-api-access-hbfwv\") pod \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.403515 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-utilities\") pod \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\" (UID: \"babf853d-ca8b-464a-97cf-70c9a6c4fed4\") " Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.404345 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-utilities" (OuterVolumeSpecName: "utilities") pod "babf853d-ca8b-464a-97cf-70c9a6c4fed4" (UID: "babf853d-ca8b-464a-97cf-70c9a6c4fed4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.409012 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/babf853d-ca8b-464a-97cf-70c9a6c4fed4-kube-api-access-hbfwv" (OuterVolumeSpecName: "kube-api-access-hbfwv") pod "babf853d-ca8b-464a-97cf-70c9a6c4fed4" (UID: "babf853d-ca8b-464a-97cf-70c9a6c4fed4"). InnerVolumeSpecName "kube-api-access-hbfwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.469322 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "babf853d-ca8b-464a-97cf-70c9a6c4fed4" (UID: "babf853d-ca8b-464a-97cf-70c9a6c4fed4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.506241 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.506503 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbfwv\" (UniqueName: \"kubernetes.io/projected/babf853d-ca8b-464a-97cf-70c9a6c4fed4-kube-api-access-hbfwv\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.506592 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/babf853d-ca8b-464a-97cf-70c9a6c4fed4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.785495 4940 generic.go:334] "Generic (PLEG): container finished" podID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerID="20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b" exitCode=0 Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.785547 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zd6rc" event={"ID":"babf853d-ca8b-464a-97cf-70c9a6c4fed4","Type":"ContainerDied","Data":"20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b"} Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.785577 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zd6rc" event={"ID":"babf853d-ca8b-464a-97cf-70c9a6c4fed4","Type":"ContainerDied","Data":"f9f3f65fafecdae255c7a96cb4e80e12cab72d8e37d257877d31acec5ab778e2"} Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.785599 4940 scope.go:117] "RemoveContainer" containerID="20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.786232 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zd6rc" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.809529 4940 scope.go:117] "RemoveContainer" containerID="6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1" Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.833443 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zd6rc"] Nov 26 09:00:04 crc kubenswrapper[4940]: I1126 09:00:04.854222 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zd6rc"] Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.049118 4940 scope.go:117] "RemoveContainer" containerID="89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561" Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.068391 4940 scope.go:117] "RemoveContainer" containerID="20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b" Nov 26 09:00:05 crc kubenswrapper[4940]: E1126 09:00:05.069220 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b\": container with ID starting with 20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b not found: ID does not exist" containerID="20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b" Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.069343 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b"} err="failed to get container status \"20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b\": rpc error: code = NotFound desc = could not find container \"20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b\": container with ID starting with 20fd7af78d80d22538ca4cc35a126598edaa9d2c17dff713fddd967097bcf45b not found: ID does not exist" Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.069442 4940 scope.go:117] "RemoveContainer" containerID="6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1" Nov 26 09:00:05 crc kubenswrapper[4940]: E1126 09:00:05.069821 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1\": container with ID starting with 6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1 not found: ID does not exist" containerID="6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1" Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.069849 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1"} err="failed to get container status \"6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1\": rpc error: code = NotFound desc = could not find container \"6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1\": container with ID starting with 6b17dc992c37f40173b5ba8f6b8ab352533f1065732c523407d0bd38070ed4b1 not found: ID does not exist" Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.069867 4940 scope.go:117] "RemoveContainer" containerID="89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561" Nov 26 09:00:05 crc kubenswrapper[4940]: E1126 09:00:05.070333 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561\": container with ID starting with 89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561 not found: ID does not exist" containerID="89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561" Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.070366 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561"} err="failed to get container status \"89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561\": rpc error: code = NotFound desc = could not find container \"89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561\": container with ID starting with 89a37c747271645b7d5f2ff7801a6c7065d3d6c3c786ebf4dba7922564451561 not found: ID does not exist" Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.183947 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96dbb1d2-519a-44b5-a49e-fc94031dd3e8" path="/var/lib/kubelet/pods/96dbb1d2-519a-44b5-a49e-fc94031dd3e8/volumes" Nov 26 09:00:05 crc kubenswrapper[4940]: I1126 09:00:05.187634 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" path="/var/lib/kubelet/pods/babf853d-ca8b-464a-97cf-70c9a6c4fed4/volumes" Nov 26 09:00:10 crc kubenswrapper[4940]: I1126 09:00:10.166009 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:00:10 crc kubenswrapper[4940]: E1126 09:00:10.167740 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:00:13 crc kubenswrapper[4940]: I1126 09:00:13.034228 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-m4qtp"] Nov 26 09:00:13 crc kubenswrapper[4940]: I1126 09:00:13.049429 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c73b-account-create-update-9fgjt"] Nov 26 09:00:13 crc kubenswrapper[4940]: I1126 09:00:13.064739 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-m4qtp"] Nov 26 09:00:13 crc kubenswrapper[4940]: I1126 09:00:13.075924 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-c73b-account-create-update-9fgjt"] Nov 26 09:00:13 crc kubenswrapper[4940]: I1126 09:00:13.185364 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c9f8615-a457-4f58-921f-41e784d31923" path="/var/lib/kubelet/pods/3c9f8615-a457-4f58-921f-41e784d31923/volumes" Nov 26 09:00:13 crc kubenswrapper[4940]: I1126 09:00:13.186781 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9813b0a0-b69f-4db5-8746-50637c407ca5" path="/var/lib/kubelet/pods/9813b0a0-b69f-4db5-8746-50637c407ca5/volumes" Nov 26 09:00:13 crc kubenswrapper[4940]: I1126 09:00:13.944439 4940 scope.go:117] "RemoveContainer" containerID="d816dc9797ef473a1ad0ca1c1ae50e879895ae27ec619dc4679d86bcc206b6eb" Nov 26 09:00:13 crc kubenswrapper[4940]: I1126 09:00:13.977269 4940 scope.go:117] "RemoveContainer" containerID="b76f3f48e3ee53393e767a51126722cb62c44bb534b50d016619021234eeb42c" Nov 26 09:00:14 crc kubenswrapper[4940]: I1126 09:00:14.033454 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-xmng9"] Nov 26 09:00:14 crc kubenswrapper[4940]: I1126 09:00:14.043842 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-pltkp"] Nov 26 09:00:14 crc kubenswrapper[4940]: I1126 09:00:14.053003 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-pltkp"] Nov 26 09:00:14 crc kubenswrapper[4940]: I1126 09:00:14.055365 4940 scope.go:117] "RemoveContainer" containerID="892e5fda5519e49768e5c5628cc8e72bd1d761010dd5ae9fcc0ad310fab29b89" Nov 26 09:00:14 crc kubenswrapper[4940]: I1126 09:00:14.063102 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-xmng9"] Nov 26 09:00:14 crc kubenswrapper[4940]: I1126 09:00:14.092200 4940 scope.go:117] "RemoveContainer" containerID="7c207c5d54ed00d7d6c6785ae92205d2f268e76c064786716c261605101d8e1e" Nov 26 09:00:15 crc kubenswrapper[4940]: I1126 09:00:15.042563 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-67ea-account-create-update-p6wmt"] Nov 26 09:00:15 crc kubenswrapper[4940]: I1126 09:00:15.052587 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-448f-account-create-update-tdw5k"] Nov 26 09:00:15 crc kubenswrapper[4940]: I1126 09:00:15.061956 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-67ea-account-create-update-p6wmt"] Nov 26 09:00:15 crc kubenswrapper[4940]: I1126 09:00:15.070990 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-448f-account-create-update-tdw5k"] Nov 26 09:00:15 crc kubenswrapper[4940]: I1126 09:00:15.181077 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3" path="/var/lib/kubelet/pods/7a7fc401-b6ea-4e51-92d5-0404d1f5b9a3/volumes" Nov 26 09:00:15 crc kubenswrapper[4940]: I1126 09:00:15.182916 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cdf461f-a5ee-47a9-95d7-dcb4aef280b5" path="/var/lib/kubelet/pods/8cdf461f-a5ee-47a9-95d7-dcb4aef280b5/volumes" Nov 26 09:00:15 crc kubenswrapper[4940]: I1126 09:00:15.184514 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfdb0c64-4bf2-444d-b6a1-32989360a09e" path="/var/lib/kubelet/pods/dfdb0c64-4bf2-444d-b6a1-32989360a09e/volumes" Nov 26 09:00:15 crc kubenswrapper[4940]: I1126 09:00:15.186172 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f271bc7c-8da3-4fdf-b77c-498d89760a85" path="/var/lib/kubelet/pods/f271bc7c-8da3-4fdf-b77c-498d89760a85/volumes" Nov 26 09:00:19 crc kubenswrapper[4940]: I1126 09:00:19.974523 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 09:00:22 crc kubenswrapper[4940]: I1126 09:00:22.165523 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:00:22 crc kubenswrapper[4940]: E1126 09:00:22.165978 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:00:32 crc kubenswrapper[4940]: I1126 09:00:32.048597 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xktpm"] Nov 26 09:00:32 crc kubenswrapper[4940]: I1126 09:00:32.065096 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xktpm"] Nov 26 09:00:33 crc kubenswrapper[4940]: I1126 09:00:33.165734 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:00:33 crc kubenswrapper[4940]: E1126 09:00:33.166285 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:00:33 crc kubenswrapper[4940]: I1126 09:00:33.176099 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8db993ee-766e-4c04-a4a3-8e6d1051101d" path="/var/lib/kubelet/pods/8db993ee-766e-4c04-a4a3-8e6d1051101d/volumes" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.573768 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66664bd5b9-b6nxc"] Nov 26 09:00:44 crc kubenswrapper[4940]: E1126 09:00:44.574804 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="extract-content" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.574820 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="extract-content" Nov 26 09:00:44 crc kubenswrapper[4940]: E1126 09:00:44.574840 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31542d31-1cc8-4a7b-a85d-af79874ed4b9" containerName="collect-profiles" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.574847 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="31542d31-1cc8-4a7b-a85d-af79874ed4b9" containerName="collect-profiles" Nov 26 09:00:44 crc kubenswrapper[4940]: E1126 09:00:44.574884 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="registry-server" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.574894 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="registry-server" Nov 26 09:00:44 crc kubenswrapper[4940]: E1126 09:00:44.574922 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="extract-utilities" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.574931 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="extract-utilities" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.575231 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="31542d31-1cc8-4a7b-a85d-af79874ed4b9" containerName="collect-profiles" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.575254 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="babf853d-ca8b-464a-97cf-70c9a6c4fed4" containerName="registry-server" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.576682 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.580568 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.587952 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66664bd5b9-b6nxc"] Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.672618 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-config\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.672898 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-dns-svc\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.673195 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-nb\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.673367 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-sb\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.673493 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-openstack-cell1\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.673634 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr76l\" (UniqueName: \"kubernetes.io/projected/a6c6b0c2-fb81-4562-bf61-b5476403b263-kube-api-access-rr76l\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.775346 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-config\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.775410 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-dns-svc\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.775499 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-nb\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.775533 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-sb\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.775558 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-openstack-cell1\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.775594 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr76l\" (UniqueName: \"kubernetes.io/projected/a6c6b0c2-fb81-4562-bf61-b5476403b263-kube-api-access-rr76l\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.776311 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-openstack-cell1\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.776345 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-dns-svc\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.776740 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-sb\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.776819 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-config\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.777511 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-nb\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.816091 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr76l\" (UniqueName: \"kubernetes.io/projected/a6c6b0c2-fb81-4562-bf61-b5476403b263-kube-api-access-rr76l\") pod \"dnsmasq-dns-66664bd5b9-b6nxc\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:44 crc kubenswrapper[4940]: I1126 09:00:44.899202 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:45 crc kubenswrapper[4940]: I1126 09:00:45.428017 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66664bd5b9-b6nxc"] Nov 26 09:00:46 crc kubenswrapper[4940]: I1126 09:00:46.045980 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-s8s4r"] Nov 26 09:00:46 crc kubenswrapper[4940]: I1126 09:00:46.056297 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-s8s4r"] Nov 26 09:00:46 crc kubenswrapper[4940]: I1126 09:00:46.222235 4940 generic.go:334] "Generic (PLEG): container finished" podID="a6c6b0c2-fb81-4562-bf61-b5476403b263" containerID="a2627de239767a5122e3dd8a38fd5ef92a0c93ea9884a34b54495ccd90b2fb30" exitCode=0 Nov 26 09:00:46 crc kubenswrapper[4940]: I1126 09:00:46.222288 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" event={"ID":"a6c6b0c2-fb81-4562-bf61-b5476403b263","Type":"ContainerDied","Data":"a2627de239767a5122e3dd8a38fd5ef92a0c93ea9884a34b54495ccd90b2fb30"} Nov 26 09:00:46 crc kubenswrapper[4940]: I1126 09:00:46.222319 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" event={"ID":"a6c6b0c2-fb81-4562-bf61-b5476403b263","Type":"ContainerStarted","Data":"88375f65c1a7b681641d4bae139ff05c86978cc7b774d457fdcfed3e8bcb5e8f"} Nov 26 09:00:47 crc kubenswrapper[4940]: I1126 09:00:47.026135 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rj66c"] Nov 26 09:00:47 crc kubenswrapper[4940]: I1126 09:00:47.036312 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rj66c"] Nov 26 09:00:47 crc kubenswrapper[4940]: I1126 09:00:47.179518 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="157792b6-aead-4358-82d4-c7ba3a6661e2" path="/var/lib/kubelet/pods/157792b6-aead-4358-82d4-c7ba3a6661e2/volumes" Nov 26 09:00:47 crc kubenswrapper[4940]: I1126 09:00:47.183076 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="679f64f7-3f0a-4b6f-800a-11aa64c61f29" path="/var/lib/kubelet/pods/679f64f7-3f0a-4b6f-800a-11aa64c61f29/volumes" Nov 26 09:00:47 crc kubenswrapper[4940]: I1126 09:00:47.235134 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" event={"ID":"a6c6b0c2-fb81-4562-bf61-b5476403b263","Type":"ContainerStarted","Data":"84ec8f3ae2ded6495f2a88eef1bb98560889da070ba274ccaf396f0be855a64c"} Nov 26 09:00:47 crc kubenswrapper[4940]: I1126 09:00:47.235531 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:47 crc kubenswrapper[4940]: I1126 09:00:47.276587 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" podStartSLOduration=3.276560679 podStartE2EDuration="3.276560679s" podCreationTimestamp="2025-11-26 09:00:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:00:47.250237994 +0000 UTC m=+7548.770379613" watchObservedRunningTime="2025-11-26 09:00:47.276560679 +0000 UTC m=+7548.796702298" Nov 26 09:00:48 crc kubenswrapper[4940]: I1126 09:00:48.165417 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:00:48 crc kubenswrapper[4940]: E1126 09:00:48.165944 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:00:54 crc kubenswrapper[4940]: I1126 09:00:54.901210 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:00:54 crc kubenswrapper[4940]: I1126 09:00:54.986257 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bd68d965-jc846"] Nov 26 09:00:54 crc kubenswrapper[4940]: I1126 09:00:54.986569 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84bd68d965-jc846" podUID="75175e73-2a8a-428f-b0e2-b1f037ab52df" containerName="dnsmasq-dns" containerID="cri-o://071d42c22df1b31ff91552e1f8807a36130ada344790ae7e9c1385fe59965fed" gracePeriod=10 Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.193712 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bd8ccb757-785rc"] Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.195546 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.209709 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bd8ccb757-785rc"] Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.303859 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-sb\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.304036 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-config\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.304186 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ml47b\" (UniqueName: \"kubernetes.io/projected/33119d12-c6e8-4c37-8f97-62bf31c346cd-kube-api-access-ml47b\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.304212 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-nb\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.304240 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-dns-svc\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.304344 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-openstack-cell1\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.332848 4940 generic.go:334] "Generic (PLEG): container finished" podID="75175e73-2a8a-428f-b0e2-b1f037ab52df" containerID="071d42c22df1b31ff91552e1f8807a36130ada344790ae7e9c1385fe59965fed" exitCode=0 Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.332889 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd68d965-jc846" event={"ID":"75175e73-2a8a-428f-b0e2-b1f037ab52df","Type":"ContainerDied","Data":"071d42c22df1b31ff91552e1f8807a36130ada344790ae7e9c1385fe59965fed"} Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.406075 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-openstack-cell1\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.406214 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-sb\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.406259 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-config\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.406363 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ml47b\" (UniqueName: \"kubernetes.io/projected/33119d12-c6e8-4c37-8f97-62bf31c346cd-kube-api-access-ml47b\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.406389 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-nb\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.406414 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-dns-svc\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.407515 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-dns-svc\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.407658 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-config\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.409210 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-openstack-cell1\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.409425 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-sb\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.409481 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-nb\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.439144 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ml47b\" (UniqueName: \"kubernetes.io/projected/33119d12-c6e8-4c37-8f97-62bf31c346cd-kube-api-access-ml47b\") pod \"dnsmasq-dns-5bd8ccb757-785rc\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.517190 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.661145 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.819468 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-config\") pod \"75175e73-2a8a-428f-b0e2-b1f037ab52df\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.819525 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-nb\") pod \"75175e73-2a8a-428f-b0e2-b1f037ab52df\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.819548 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-sb\") pod \"75175e73-2a8a-428f-b0e2-b1f037ab52df\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.819689 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-dns-svc\") pod \"75175e73-2a8a-428f-b0e2-b1f037ab52df\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.819781 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftkmk\" (UniqueName: \"kubernetes.io/projected/75175e73-2a8a-428f-b0e2-b1f037ab52df-kube-api-access-ftkmk\") pod \"75175e73-2a8a-428f-b0e2-b1f037ab52df\" (UID: \"75175e73-2a8a-428f-b0e2-b1f037ab52df\") " Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.824860 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75175e73-2a8a-428f-b0e2-b1f037ab52df-kube-api-access-ftkmk" (OuterVolumeSpecName: "kube-api-access-ftkmk") pod "75175e73-2a8a-428f-b0e2-b1f037ab52df" (UID: "75175e73-2a8a-428f-b0e2-b1f037ab52df"). InnerVolumeSpecName "kube-api-access-ftkmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.875023 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "75175e73-2a8a-428f-b0e2-b1f037ab52df" (UID: "75175e73-2a8a-428f-b0e2-b1f037ab52df"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.875033 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-config" (OuterVolumeSpecName: "config") pod "75175e73-2a8a-428f-b0e2-b1f037ab52df" (UID: "75175e73-2a8a-428f-b0e2-b1f037ab52df"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.876771 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "75175e73-2a8a-428f-b0e2-b1f037ab52df" (UID: "75175e73-2a8a-428f-b0e2-b1f037ab52df"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.877197 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "75175e73-2a8a-428f-b0e2-b1f037ab52df" (UID: "75175e73-2a8a-428f-b0e2-b1f037ab52df"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.921922 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.921965 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftkmk\" (UniqueName: \"kubernetes.io/projected/75175e73-2a8a-428f-b0e2-b1f037ab52df-kube-api-access-ftkmk\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.921978 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-config\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.921990 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.922001 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75175e73-2a8a-428f-b0e2-b1f037ab52df-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:55 crc kubenswrapper[4940]: I1126 09:00:55.979529 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bd8ccb757-785rc"] Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.343007 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd68d965-jc846" event={"ID":"75175e73-2a8a-428f-b0e2-b1f037ab52df","Type":"ContainerDied","Data":"e1065f5e38e3d9b05f784149c2513f1908106593249b90038170710230d660cb"} Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.343330 4940 scope.go:117] "RemoveContainer" containerID="071d42c22df1b31ff91552e1f8807a36130ada344790ae7e9c1385fe59965fed" Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.343228 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bd68d965-jc846" Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.346562 4940 generic.go:334] "Generic (PLEG): container finished" podID="33119d12-c6e8-4c37-8f97-62bf31c346cd" containerID="a9c43899f54290db9cfd8829342976fdcdcb711d15527c48be653ba2c4f1b2e3" exitCode=0 Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.346609 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" event={"ID":"33119d12-c6e8-4c37-8f97-62bf31c346cd","Type":"ContainerDied","Data":"a9c43899f54290db9cfd8829342976fdcdcb711d15527c48be653ba2c4f1b2e3"} Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.346666 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" event={"ID":"33119d12-c6e8-4c37-8f97-62bf31c346cd","Type":"ContainerStarted","Data":"13c9ab8ca747877b8d04662bcc967afeacf54cb555c78ed5747921faf07050a1"} Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.539914 4940 scope.go:117] "RemoveContainer" containerID="3c68fc0a48babfaa78386513355559ba4b26efc000bc77d55e8c39e24fdf54d5" Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.563509 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bd68d965-jc846"] Nov 26 09:00:56 crc kubenswrapper[4940]: I1126 09:00:56.575714 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84bd68d965-jc846"] Nov 26 09:00:57 crc kubenswrapper[4940]: I1126 09:00:57.178154 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75175e73-2a8a-428f-b0e2-b1f037ab52df" path="/var/lib/kubelet/pods/75175e73-2a8a-428f-b0e2-b1f037ab52df/volumes" Nov 26 09:00:57 crc kubenswrapper[4940]: I1126 09:00:57.358392 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" event={"ID":"33119d12-c6e8-4c37-8f97-62bf31c346cd","Type":"ContainerStarted","Data":"3c8e662e30f46cd243a7f6b05dbc01a94cab2872c6363ba0f7f6de23182ac424"} Nov 26 09:00:57 crc kubenswrapper[4940]: I1126 09:00:57.358567 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:00:57 crc kubenswrapper[4940]: I1126 09:00:57.378663 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" podStartSLOduration=2.378638575 podStartE2EDuration="2.378638575s" podCreationTimestamp="2025-11-26 09:00:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:00:57.377201899 +0000 UTC m=+7558.897343528" watchObservedRunningTime="2025-11-26 09:00:57.378638575 +0000 UTC m=+7558.898780234" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.136821 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29402461-k9qcd"] Nov 26 09:01:00 crc kubenswrapper[4940]: E1126 09:01:00.137858 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75175e73-2a8a-428f-b0e2-b1f037ab52df" containerName="dnsmasq-dns" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.137874 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="75175e73-2a8a-428f-b0e2-b1f037ab52df" containerName="dnsmasq-dns" Nov 26 09:01:00 crc kubenswrapper[4940]: E1126 09:01:00.137889 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75175e73-2a8a-428f-b0e2-b1f037ab52df" containerName="init" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.137896 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="75175e73-2a8a-428f-b0e2-b1f037ab52df" containerName="init" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.138346 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="75175e73-2a8a-428f-b0e2-b1f037ab52df" containerName="dnsmasq-dns" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.139448 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.153674 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402461-k9qcd"] Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.203470 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-config-data\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.203529 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-fernet-keys\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.203825 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-combined-ca-bundle\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.203910 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdctp\" (UniqueName: \"kubernetes.io/projected/df21e3c2-92cf-4d49-b51b-84a895e3e78f-kube-api-access-tdctp\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.305890 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-combined-ca-bundle\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.306302 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdctp\" (UniqueName: \"kubernetes.io/projected/df21e3c2-92cf-4d49-b51b-84a895e3e78f-kube-api-access-tdctp\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.306344 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-config-data\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.306366 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-fernet-keys\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.311955 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-combined-ca-bundle\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.312133 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-config-data\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.312347 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-fernet-keys\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.323222 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdctp\" (UniqueName: \"kubernetes.io/projected/df21e3c2-92cf-4d49-b51b-84a895e3e78f-kube-api-access-tdctp\") pod \"keystone-cron-29402461-k9qcd\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.461817 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:00 crc kubenswrapper[4940]: I1126 09:01:00.994715 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402461-k9qcd"] Nov 26 09:01:01 crc kubenswrapper[4940]: I1126 09:01:01.405798 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402461-k9qcd" event={"ID":"df21e3c2-92cf-4d49-b51b-84a895e3e78f","Type":"ContainerStarted","Data":"d557a4db23926f5fb4ccc28fad689ada094cf188fe01f0df709f88cbb07e7cfd"} Nov 26 09:01:01 crc kubenswrapper[4940]: I1126 09:01:01.406019 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402461-k9qcd" event={"ID":"df21e3c2-92cf-4d49-b51b-84a895e3e78f","Type":"ContainerStarted","Data":"0f5b80c18322d64dce3a8eff753946f1fa76e8b96571e7e0a451f633e9364232"} Nov 26 09:01:01 crc kubenswrapper[4940]: I1126 09:01:01.430000 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29402461-k9qcd" podStartSLOduration=1.429979904 podStartE2EDuration="1.429979904s" podCreationTimestamp="2025-11-26 09:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:01:01.42458647 +0000 UTC m=+7562.944728089" watchObservedRunningTime="2025-11-26 09:01:01.429979904 +0000 UTC m=+7562.950121523" Nov 26 09:01:02 crc kubenswrapper[4940]: I1126 09:01:02.030110 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-ztlzq"] Nov 26 09:01:02 crc kubenswrapper[4940]: I1126 09:01:02.039426 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-ztlzq"] Nov 26 09:01:02 crc kubenswrapper[4940]: I1126 09:01:02.165875 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:01:02 crc kubenswrapper[4940]: E1126 09:01:02.166351 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:01:03 crc kubenswrapper[4940]: I1126 09:01:03.182535 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbfeccf1-3acd-4316-b412-e2a4481b54d4" path="/var/lib/kubelet/pods/fbfeccf1-3acd-4316-b412-e2a4481b54d4/volumes" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.509443 4940 generic.go:334] "Generic (PLEG): container finished" podID="df21e3c2-92cf-4d49-b51b-84a895e3e78f" containerID="d557a4db23926f5fb4ccc28fad689ada094cf188fe01f0df709f88cbb07e7cfd" exitCode=0 Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.509516 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402461-k9qcd" event={"ID":"df21e3c2-92cf-4d49-b51b-84a895e3e78f","Type":"ContainerDied","Data":"d557a4db23926f5fb4ccc28fad689ada094cf188fe01f0df709f88cbb07e7cfd"} Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.518228 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.601397 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66664bd5b9-b6nxc"] Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.601871 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" podUID="a6c6b0c2-fb81-4562-bf61-b5476403b263" containerName="dnsmasq-dns" containerID="cri-o://84ec8f3ae2ded6495f2a88eef1bb98560889da070ba274ccaf396f0be855a64c" gracePeriod=10 Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.840773 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c864b8c85-8zffx"] Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.847637 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.853618 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-networker" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.868489 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c864b8c85-8zffx"] Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.909358 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-dns-svc\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.909409 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-ovsdbserver-nb\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.909434 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-ovsdbserver-sb\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.909584 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-config\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.909678 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-openstack-cell1\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.909727 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-networker\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-openstack-networker\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:05 crc kubenswrapper[4940]: I1126 09:01:05.909867 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhgbw\" (UniqueName: \"kubernetes.io/projected/e40e18b4-b34c-474e-a2f4-01e35988aa45-kube-api-access-hhgbw\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.011785 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-config\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.011839 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-openstack-cell1\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.011858 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-networker\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-openstack-networker\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.011914 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhgbw\" (UniqueName: \"kubernetes.io/projected/e40e18b4-b34c-474e-a2f4-01e35988aa45-kube-api-access-hhgbw\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.012002 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-dns-svc\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.012029 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-ovsdbserver-nb\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.012072 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-ovsdbserver-sb\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.013016 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-ovsdbserver-nb\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.013080 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-dns-svc\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.013174 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-openstack-cell1\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.013238 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-ovsdbserver-sb\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.013423 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-networker\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-openstack-networker\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.013434 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e40e18b4-b34c-474e-a2f4-01e35988aa45-config\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.030679 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhgbw\" (UniqueName: \"kubernetes.io/projected/e40e18b4-b34c-474e-a2f4-01e35988aa45-kube-api-access-hhgbw\") pod \"dnsmasq-dns-7c864b8c85-8zffx\" (UID: \"e40e18b4-b34c-474e-a2f4-01e35988aa45\") " pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.163151 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.547285 4940 generic.go:334] "Generic (PLEG): container finished" podID="a6c6b0c2-fb81-4562-bf61-b5476403b263" containerID="84ec8f3ae2ded6495f2a88eef1bb98560889da070ba274ccaf396f0be855a64c" exitCode=0 Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.547354 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" event={"ID":"a6c6b0c2-fb81-4562-bf61-b5476403b263","Type":"ContainerDied","Data":"84ec8f3ae2ded6495f2a88eef1bb98560889da070ba274ccaf396f0be855a64c"} Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.794596 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.907401 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c864b8c85-8zffx"] Nov 26 09:01:06 crc kubenswrapper[4940]: W1126 09:01:06.926192 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode40e18b4_b34c_474e_a2f4_01e35988aa45.slice/crio-cd8cc06b3b113b63d8157b69123f60eb4409b3a4ee3328652e2d2e1bd6531f90 WatchSource:0}: Error finding container cd8cc06b3b113b63d8157b69123f60eb4409b3a4ee3328652e2d2e1bd6531f90: Status 404 returned error can't find the container with id cd8cc06b3b113b63d8157b69123f60eb4409b3a4ee3328652e2d2e1bd6531f90 Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.932739 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-sb\") pod \"a6c6b0c2-fb81-4562-bf61-b5476403b263\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.932881 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-dns-svc\") pod \"a6c6b0c2-fb81-4562-bf61-b5476403b263\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.932998 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rr76l\" (UniqueName: \"kubernetes.io/projected/a6c6b0c2-fb81-4562-bf61-b5476403b263-kube-api-access-rr76l\") pod \"a6c6b0c2-fb81-4562-bf61-b5476403b263\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.933092 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-config\") pod \"a6c6b0c2-fb81-4562-bf61-b5476403b263\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.933126 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-openstack-cell1\") pod \"a6c6b0c2-fb81-4562-bf61-b5476403b263\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.933145 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-nb\") pod \"a6c6b0c2-fb81-4562-bf61-b5476403b263\" (UID: \"a6c6b0c2-fb81-4562-bf61-b5476403b263\") " Nov 26 09:01:06 crc kubenswrapper[4940]: I1126 09:01:06.936733 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6c6b0c2-fb81-4562-bf61-b5476403b263-kube-api-access-rr76l" (OuterVolumeSpecName: "kube-api-access-rr76l") pod "a6c6b0c2-fb81-4562-bf61-b5476403b263" (UID: "a6c6b0c2-fb81-4562-bf61-b5476403b263"). InnerVolumeSpecName "kube-api-access-rr76l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.021606 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a6c6b0c2-fb81-4562-bf61-b5476403b263" (UID: "a6c6b0c2-fb81-4562-bf61-b5476403b263"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.034944 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.034972 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rr76l\" (UniqueName: \"kubernetes.io/projected/a6c6b0c2-fb81-4562-bf61-b5476403b263-kube-api-access-rr76l\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.035811 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "a6c6b0c2-fb81-4562-bf61-b5476403b263" (UID: "a6c6b0c2-fb81-4562-bf61-b5476403b263"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.043087 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a6c6b0c2-fb81-4562-bf61-b5476403b263" (UID: "a6c6b0c2-fb81-4562-bf61-b5476403b263"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.050326 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-config" (OuterVolumeSpecName: "config") pod "a6c6b0c2-fb81-4562-bf61-b5476403b263" (UID: "a6c6b0c2-fb81-4562-bf61-b5476403b263"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.060628 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a6c6b0c2-fb81-4562-bf61-b5476403b263" (UID: "a6c6b0c2-fb81-4562-bf61-b5476403b263"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.136597 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.136623 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-config\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.136633 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.136645 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6c6b0c2-fb81-4562-bf61-b5476403b263-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.153875 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.237393 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-config-data\") pod \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.237467 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-combined-ca-bundle\") pod \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.237602 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-fernet-keys\") pod \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.237694 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdctp\" (UniqueName: \"kubernetes.io/projected/df21e3c2-92cf-4d49-b51b-84a895e3e78f-kube-api-access-tdctp\") pod \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\" (UID: \"df21e3c2-92cf-4d49-b51b-84a895e3e78f\") " Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.242403 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df21e3c2-92cf-4d49-b51b-84a895e3e78f-kube-api-access-tdctp" (OuterVolumeSpecName: "kube-api-access-tdctp") pod "df21e3c2-92cf-4d49-b51b-84a895e3e78f" (UID: "df21e3c2-92cf-4d49-b51b-84a895e3e78f"). InnerVolumeSpecName "kube-api-access-tdctp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.244612 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "df21e3c2-92cf-4d49-b51b-84a895e3e78f" (UID: "df21e3c2-92cf-4d49-b51b-84a895e3e78f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.281510 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "df21e3c2-92cf-4d49-b51b-84a895e3e78f" (UID: "df21e3c2-92cf-4d49-b51b-84a895e3e78f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.310905 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-config-data" (OuterVolumeSpecName: "config-data") pod "df21e3c2-92cf-4d49-b51b-84a895e3e78f" (UID: "df21e3c2-92cf-4d49-b51b-84a895e3e78f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.340175 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.340411 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.340420 4940 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/df21e3c2-92cf-4d49-b51b-84a895e3e78f-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.340429 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdctp\" (UniqueName: \"kubernetes.io/projected/df21e3c2-92cf-4d49-b51b-84a895e3e78f-kube-api-access-tdctp\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.561347 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.561335 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66664bd5b9-b6nxc" event={"ID":"a6c6b0c2-fb81-4562-bf61-b5476403b263","Type":"ContainerDied","Data":"88375f65c1a7b681641d4bae139ff05c86978cc7b774d457fdcfed3e8bcb5e8f"} Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.562225 4940 scope.go:117] "RemoveContainer" containerID="84ec8f3ae2ded6495f2a88eef1bb98560889da070ba274ccaf396f0be855a64c" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.563442 4940 generic.go:334] "Generic (PLEG): container finished" podID="e40e18b4-b34c-474e-a2f4-01e35988aa45" containerID="67f458a1ba83be0dbc3c7d5d26745610895e8f9dc8a90ea4e09c1d2ec4e7167a" exitCode=0 Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.563470 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" event={"ID":"e40e18b4-b34c-474e-a2f4-01e35988aa45","Type":"ContainerDied","Data":"67f458a1ba83be0dbc3c7d5d26745610895e8f9dc8a90ea4e09c1d2ec4e7167a"} Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.563505 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" event={"ID":"e40e18b4-b34c-474e-a2f4-01e35988aa45","Type":"ContainerStarted","Data":"cd8cc06b3b113b63d8157b69123f60eb4409b3a4ee3328652e2d2e1bd6531f90"} Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.566572 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402461-k9qcd" event={"ID":"df21e3c2-92cf-4d49-b51b-84a895e3e78f","Type":"ContainerDied","Data":"0f5b80c18322d64dce3a8eff753946f1fa76e8b96571e7e0a451f633e9364232"} Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.566619 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f5b80c18322d64dce3a8eff753946f1fa76e8b96571e7e0a451f633e9364232" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.566683 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402461-k9qcd" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.595940 4940 scope.go:117] "RemoveContainer" containerID="a2627de239767a5122e3dd8a38fd5ef92a0c93ea9884a34b54495ccd90b2fb30" Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.601489 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66664bd5b9-b6nxc"] Nov 26 09:01:07 crc kubenswrapper[4940]: I1126 09:01:07.613406 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66664bd5b9-b6nxc"] Nov 26 09:01:08 crc kubenswrapper[4940]: I1126 09:01:08.584078 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" event={"ID":"e40e18b4-b34c-474e-a2f4-01e35988aa45","Type":"ContainerStarted","Data":"1c28c41814a66b25e64acd129b57d4677c4271f7e2ae21794c1e5ae9333ea19c"} Nov 26 09:01:08 crc kubenswrapper[4940]: I1126 09:01:08.584726 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:08 crc kubenswrapper[4940]: I1126 09:01:08.614173 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" podStartSLOduration=3.614154448 podStartE2EDuration="3.614154448s" podCreationTimestamp="2025-11-26 09:01:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:01:08.609056945 +0000 UTC m=+7570.129198574" watchObservedRunningTime="2025-11-26 09:01:08.614154448 +0000 UTC m=+7570.134296067" Nov 26 09:01:09 crc kubenswrapper[4940]: I1126 09:01:09.180860 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6c6b0c2-fb81-4562-bf61-b5476403b263" path="/var/lib/kubelet/pods/a6c6b0c2-fb81-4562-bf61-b5476403b263/volumes" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.166507 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:01:14 crc kubenswrapper[4940]: E1126 09:01:14.167596 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.362638 4940 scope.go:117] "RemoveContainer" containerID="740c51b8315bfb8a669034ad2f9d328c6e593c8ba897326f821dc35384d080ba" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.397673 4940 scope.go:117] "RemoveContainer" containerID="4fbe1f5b41756c2cd41088fdf944b051d0264ba0d6a46c302b279e60891a86a8" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.459365 4940 scope.go:117] "RemoveContainer" containerID="45cc08322947d7ec2e089f5fa0574744d38b51fc39376e740de93a78a1c6a311" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.542918 4940 scope.go:117] "RemoveContainer" containerID="8d8dd3f9a61115d7b0aaca1f6674482246b64b220cbef8fde0c2384c0e854e94" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.584680 4940 scope.go:117] "RemoveContainer" containerID="011384c1edf223a9d805308d673107fb725b8596a0e79bf23284a900aeb7908f" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.635636 4940 scope.go:117] "RemoveContainer" containerID="29206dd1aaafe63d76a228cc43ca79decd663b97e9e163598ef896d4ddd014d8" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.719870 4940 scope.go:117] "RemoveContainer" containerID="190fae3d34f158d1f87b93eb4b3be09a481d00e353b120788862ef8799aace96" Nov 26 09:01:14 crc kubenswrapper[4940]: I1126 09:01:14.786158 4940 scope.go:117] "RemoveContainer" containerID="837ce6010df320a7ed19500c3ba21731eed93c1b2a9d9c30b1238a7e61cc5df0" Nov 26 09:01:16 crc kubenswrapper[4940]: I1126 09:01:16.164803 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c864b8c85-8zffx" Nov 26 09:01:16 crc kubenswrapper[4940]: I1126 09:01:16.254288 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bd8ccb757-785rc"] Nov 26 09:01:16 crc kubenswrapper[4940]: I1126 09:01:16.254642 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" podUID="33119d12-c6e8-4c37-8f97-62bf31c346cd" containerName="dnsmasq-dns" containerID="cri-o://3c8e662e30f46cd243a7f6b05dbc01a94cab2872c6363ba0f7f6de23182ac424" gracePeriod=10 Nov 26 09:01:16 crc kubenswrapper[4940]: I1126 09:01:16.686537 4940 generic.go:334] "Generic (PLEG): container finished" podID="33119d12-c6e8-4c37-8f97-62bf31c346cd" containerID="3c8e662e30f46cd243a7f6b05dbc01a94cab2872c6363ba0f7f6de23182ac424" exitCode=0 Nov 26 09:01:16 crc kubenswrapper[4940]: I1126 09:01:16.686622 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" event={"ID":"33119d12-c6e8-4c37-8f97-62bf31c346cd","Type":"ContainerDied","Data":"3c8e662e30f46cd243a7f6b05dbc01a94cab2872c6363ba0f7f6de23182ac424"} Nov 26 09:01:16 crc kubenswrapper[4940]: I1126 09:01:16.879835 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.046077 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-sb\") pod \"33119d12-c6e8-4c37-8f97-62bf31c346cd\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.046152 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-config\") pod \"33119d12-c6e8-4c37-8f97-62bf31c346cd\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.046286 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ml47b\" (UniqueName: \"kubernetes.io/projected/33119d12-c6e8-4c37-8f97-62bf31c346cd-kube-api-access-ml47b\") pod \"33119d12-c6e8-4c37-8f97-62bf31c346cd\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.046321 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-dns-svc\") pod \"33119d12-c6e8-4c37-8f97-62bf31c346cd\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.046367 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-openstack-cell1\") pod \"33119d12-c6e8-4c37-8f97-62bf31c346cd\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.046440 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-nb\") pod \"33119d12-c6e8-4c37-8f97-62bf31c346cd\" (UID: \"33119d12-c6e8-4c37-8f97-62bf31c346cd\") " Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.052404 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33119d12-c6e8-4c37-8f97-62bf31c346cd-kube-api-access-ml47b" (OuterVolumeSpecName: "kube-api-access-ml47b") pod "33119d12-c6e8-4c37-8f97-62bf31c346cd" (UID: "33119d12-c6e8-4c37-8f97-62bf31c346cd"). InnerVolumeSpecName "kube-api-access-ml47b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.102970 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-config" (OuterVolumeSpecName: "config") pod "33119d12-c6e8-4c37-8f97-62bf31c346cd" (UID: "33119d12-c6e8-4c37-8f97-62bf31c346cd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.112732 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "33119d12-c6e8-4c37-8f97-62bf31c346cd" (UID: "33119d12-c6e8-4c37-8f97-62bf31c346cd"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.115368 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "33119d12-c6e8-4c37-8f97-62bf31c346cd" (UID: "33119d12-c6e8-4c37-8f97-62bf31c346cd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.119012 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "33119d12-c6e8-4c37-8f97-62bf31c346cd" (UID: "33119d12-c6e8-4c37-8f97-62bf31c346cd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.127638 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "33119d12-c6e8-4c37-8f97-62bf31c346cd" (UID: "33119d12-c6e8-4c37-8f97-62bf31c346cd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.152845 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.152886 4940 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-config\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.152902 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ml47b\" (UniqueName: \"kubernetes.io/projected/33119d12-c6e8-4c37-8f97-62bf31c346cd-kube-api-access-ml47b\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.152913 4940 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.152921 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.152929 4940 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33119d12-c6e8-4c37-8f97-62bf31c346cd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.697121 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" event={"ID":"33119d12-c6e8-4c37-8f97-62bf31c346cd","Type":"ContainerDied","Data":"13c9ab8ca747877b8d04662bcc967afeacf54cb555c78ed5747921faf07050a1"} Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.697169 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bd8ccb757-785rc" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.697175 4940 scope.go:117] "RemoveContainer" containerID="3c8e662e30f46cd243a7f6b05dbc01a94cab2872c6363ba0f7f6de23182ac424" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.725590 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bd8ccb757-785rc"] Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.729574 4940 scope.go:117] "RemoveContainer" containerID="a9c43899f54290db9cfd8829342976fdcdcb711d15527c48be653ba2c4f1b2e3" Nov 26 09:01:17 crc kubenswrapper[4940]: I1126 09:01:17.736580 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bd8ccb757-785rc"] Nov 26 09:01:19 crc kubenswrapper[4940]: I1126 09:01:19.183969 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33119d12-c6e8-4c37-8f97-62bf31c346cd" path="/var/lib/kubelet/pods/33119d12-c6e8-4c37-8f97-62bf31c346cd/volumes" Nov 26 09:01:27 crc kubenswrapper[4940]: I1126 09:01:27.165845 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:01:27 crc kubenswrapper[4940]: E1126 09:01:27.166970 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.600494 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq"] Nov 26 09:01:31 crc kubenswrapper[4940]: E1126 09:01:31.601560 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33119d12-c6e8-4c37-8f97-62bf31c346cd" containerName="dnsmasq-dns" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.601579 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="33119d12-c6e8-4c37-8f97-62bf31c346cd" containerName="dnsmasq-dns" Nov 26 09:01:31 crc kubenswrapper[4940]: E1126 09:01:31.601604 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33119d12-c6e8-4c37-8f97-62bf31c346cd" containerName="init" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.601611 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="33119d12-c6e8-4c37-8f97-62bf31c346cd" containerName="init" Nov 26 09:01:31 crc kubenswrapper[4940]: E1126 09:01:31.601637 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df21e3c2-92cf-4d49-b51b-84a895e3e78f" containerName="keystone-cron" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.601646 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="df21e3c2-92cf-4d49-b51b-84a895e3e78f" containerName="keystone-cron" Nov 26 09:01:31 crc kubenswrapper[4940]: E1126 09:01:31.601658 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6c6b0c2-fb81-4562-bf61-b5476403b263" containerName="dnsmasq-dns" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.601665 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6c6b0c2-fb81-4562-bf61-b5476403b263" containerName="dnsmasq-dns" Nov 26 09:01:31 crc kubenswrapper[4940]: E1126 09:01:31.601681 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6c6b0c2-fb81-4562-bf61-b5476403b263" containerName="init" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.601687 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6c6b0c2-fb81-4562-bf61-b5476403b263" containerName="init" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.601920 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="df21e3c2-92cf-4d49-b51b-84a895e3e78f" containerName="keystone-cron" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.601947 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6c6b0c2-fb81-4562-bf61-b5476403b263" containerName="dnsmasq-dns" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.601958 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="33119d12-c6e8-4c37-8f97-62bf31c346cd" containerName="dnsmasq-dns" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.602715 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.605424 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.605631 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.605875 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.606006 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.614609 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x"] Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.618270 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.621689 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.622205 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.627133 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq"] Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.641062 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x"] Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.687755 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.687827 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grq66\" (UniqueName: \"kubernetes.io/projected/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-kube-api-access-grq66\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.688095 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.688118 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.688193 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.688343 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.688400 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-245cx\" (UniqueName: \"kubernetes.io/projected/801e3632-a0b9-46a0-bff8-0ce14f7f5304-kube-api-access-245cx\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.688449 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.688491 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.790738 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.790825 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.790857 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-245cx\" (UniqueName: \"kubernetes.io/projected/801e3632-a0b9-46a0-bff8-0ce14f7f5304-kube-api-access-245cx\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.790893 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.790932 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.790999 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.791067 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grq66\" (UniqueName: \"kubernetes.io/projected/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-kube-api-access-grq66\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.791137 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.791162 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.799910 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.800759 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.800890 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.802483 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.808239 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.811505 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.818459 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.818493 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-245cx\" (UniqueName: \"kubernetes.io/projected/801e3632-a0b9-46a0-bff8-0ce14f7f5304-kube-api-access-245cx\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.818609 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grq66\" (UniqueName: \"kubernetes.io/projected/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-kube-api-access-grq66\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.934188 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:31 crc kubenswrapper[4940]: I1126 09:01:31.953975 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:32 crc kubenswrapper[4940]: I1126 09:01:32.586620 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x"] Nov 26 09:01:32 crc kubenswrapper[4940]: I1126 09:01:32.875861 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" event={"ID":"76ea5069-a91e-4ffe-b5c4-41ddd23fd721","Type":"ContainerStarted","Data":"613a830078df6889ef600743af64627863cd62adffef538d1977cd3ccb781ef0"} Nov 26 09:01:33 crc kubenswrapper[4940]: I1126 09:01:33.534641 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq"] Nov 26 09:01:33 crc kubenswrapper[4940]: W1126 09:01:33.537133 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod801e3632_a0b9_46a0_bff8_0ce14f7f5304.slice/crio-ded835d697020ed49eb5592b9a2cecdbbfd1f429cd2e4f1f36798e7b46ff092f WatchSource:0}: Error finding container ded835d697020ed49eb5592b9a2cecdbbfd1f429cd2e4f1f36798e7b46ff092f: Status 404 returned error can't find the container with id ded835d697020ed49eb5592b9a2cecdbbfd1f429cd2e4f1f36798e7b46ff092f Nov 26 09:01:33 crc kubenswrapper[4940]: I1126 09:01:33.887776 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" event={"ID":"801e3632-a0b9-46a0-bff8-0ce14f7f5304","Type":"ContainerStarted","Data":"ded835d697020ed49eb5592b9a2cecdbbfd1f429cd2e4f1f36798e7b46ff092f"} Nov 26 09:01:42 crc kubenswrapper[4940]: I1126 09:01:42.165706 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:01:42 crc kubenswrapper[4940]: E1126 09:01:42.166719 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:01:44 crc kubenswrapper[4940]: I1126 09:01:44.046325 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" event={"ID":"801e3632-a0b9-46a0-bff8-0ce14f7f5304","Type":"ContainerStarted","Data":"d7880a7b2597cb79213e98f3cbaa7f3073ce1098cf642ed3658a3120a0ae65ba"} Nov 26 09:01:44 crc kubenswrapper[4940]: I1126 09:01:44.048241 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-19da-account-create-update-g2knf"] Nov 26 09:01:44 crc kubenswrapper[4940]: I1126 09:01:44.048794 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" event={"ID":"76ea5069-a91e-4ffe-b5c4-41ddd23fd721","Type":"ContainerStarted","Data":"2bad423c534202d99f029b7d203f5ce40b1b9bb184e2a3db85ea273c30c96f0a"} Nov 26 09:01:44 crc kubenswrapper[4940]: I1126 09:01:44.061639 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-19da-account-create-update-g2knf"] Nov 26 09:01:44 crc kubenswrapper[4940]: I1126 09:01:44.076192 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-xql5f"] Nov 26 09:01:44 crc kubenswrapper[4940]: I1126 09:01:44.089265 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-xql5f"] Nov 26 09:01:44 crc kubenswrapper[4940]: I1126 09:01:44.096722 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" podStartSLOduration=3.601089223 podStartE2EDuration="13.096699017s" podCreationTimestamp="2025-11-26 09:01:31 +0000 UTC" firstStartedPulling="2025-11-26 09:01:33.539726741 +0000 UTC m=+7595.059868420" lastFinishedPulling="2025-11-26 09:01:43.035336595 +0000 UTC m=+7604.555478214" observedRunningTime="2025-11-26 09:01:44.073963916 +0000 UTC m=+7605.594105545" watchObservedRunningTime="2025-11-26 09:01:44.096699017 +0000 UTC m=+7605.616840646" Nov 26 09:01:44 crc kubenswrapper[4940]: I1126 09:01:44.104409 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" podStartSLOduration=2.668657204 podStartE2EDuration="13.104391324s" podCreationTimestamp="2025-11-26 09:01:31 +0000 UTC" firstStartedPulling="2025-11-26 09:01:32.59597855 +0000 UTC m=+7594.116120169" lastFinishedPulling="2025-11-26 09:01:43.03171266 +0000 UTC m=+7604.551854289" observedRunningTime="2025-11-26 09:01:44.087249914 +0000 UTC m=+7605.607391543" watchObservedRunningTime="2025-11-26 09:01:44.104391324 +0000 UTC m=+7605.624532953" Nov 26 09:01:45 crc kubenswrapper[4940]: I1126 09:01:45.178253 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43dbf499-08fa-48d2-baab-dfcf7fac7d9e" path="/var/lib/kubelet/pods/43dbf499-08fa-48d2-baab-dfcf7fac7d9e/volumes" Nov 26 09:01:45 crc kubenswrapper[4940]: I1126 09:01:45.179167 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b33a04f0-2a5a-4cb0-82f2-d45d62216aa0" path="/var/lib/kubelet/pods/b33a04f0-2a5a-4cb0-82f2-d45d62216aa0/volumes" Nov 26 09:01:53 crc kubenswrapper[4940]: I1126 09:01:53.166854 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:01:53 crc kubenswrapper[4940]: I1126 09:01:53.167410 4940 generic.go:334] "Generic (PLEG): container finished" podID="76ea5069-a91e-4ffe-b5c4-41ddd23fd721" containerID="2bad423c534202d99f029b7d203f5ce40b1b9bb184e2a3db85ea273c30c96f0a" exitCode=0 Nov 26 09:01:53 crc kubenswrapper[4940]: E1126 09:01:53.168233 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:01:53 crc kubenswrapper[4940]: I1126 09:01:53.192524 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" event={"ID":"76ea5069-a91e-4ffe-b5c4-41ddd23fd721","Type":"ContainerDied","Data":"2bad423c534202d99f029b7d203f5ce40b1b9bb184e2a3db85ea273c30c96f0a"} Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.183478 4940 generic.go:334] "Generic (PLEG): container finished" podID="801e3632-a0b9-46a0-bff8-0ce14f7f5304" containerID="d7880a7b2597cb79213e98f3cbaa7f3073ce1098cf642ed3658a3120a0ae65ba" exitCode=0 Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.183577 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" event={"ID":"801e3632-a0b9-46a0-bff8-0ce14f7f5304","Type":"ContainerDied","Data":"d7880a7b2597cb79213e98f3cbaa7f3073ce1098cf642ed3658a3120a0ae65ba"} Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.755932 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.885779 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grq66\" (UniqueName: \"kubernetes.io/projected/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-kube-api-access-grq66\") pod \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.885971 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-ssh-key\") pod \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.887099 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-inventory\") pod \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.887243 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-pre-adoption-validation-combined-ca-bundle\") pod \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\" (UID: \"76ea5069-a91e-4ffe-b5c4-41ddd23fd721\") " Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.899313 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "76ea5069-a91e-4ffe-b5c4-41ddd23fd721" (UID: "76ea5069-a91e-4ffe-b5c4-41ddd23fd721"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.899421 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-kube-api-access-grq66" (OuterVolumeSpecName: "kube-api-access-grq66") pod "76ea5069-a91e-4ffe-b5c4-41ddd23fd721" (UID: "76ea5069-a91e-4ffe-b5c4-41ddd23fd721"). InnerVolumeSpecName "kube-api-access-grq66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.943878 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-inventory" (OuterVolumeSpecName: "inventory") pod "76ea5069-a91e-4ffe-b5c4-41ddd23fd721" (UID: "76ea5069-a91e-4ffe-b5c4-41ddd23fd721"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.956849 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "76ea5069-a91e-4ffe-b5c4-41ddd23fd721" (UID: "76ea5069-a91e-4ffe-b5c4-41ddd23fd721"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.996028 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grq66\" (UniqueName: \"kubernetes.io/projected/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-kube-api-access-grq66\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.996094 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.996109 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:54 crc kubenswrapper[4940]: I1126 09:01:54.996122 4940 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76ea5069-a91e-4ffe-b5c4-41ddd23fd721-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.203242 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.203240 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x" event={"ID":"76ea5069-a91e-4ffe-b5c4-41ddd23fd721","Type":"ContainerDied","Data":"613a830078df6889ef600743af64627863cd62adffef538d1977cd3ccb781ef0"} Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.204486 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="613a830078df6889ef600743af64627863cd62adffef538d1977cd3ccb781ef0" Nov 26 09:01:55 crc kubenswrapper[4940]: E1126 09:01:55.419125 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76ea5069_a91e_4ffe_b5c4_41ddd23fd721.slice/crio-613a830078df6889ef600743af64627863cd62adffef538d1977cd3ccb781ef0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76ea5069_a91e_4ffe_b5c4_41ddd23fd721.slice\": RecentStats: unable to find data in memory cache]" Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.805686 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.918533 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ssh-key\") pod \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.918626 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-245cx\" (UniqueName: \"kubernetes.io/projected/801e3632-a0b9-46a0-bff8-0ce14f7f5304-kube-api-access-245cx\") pod \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.918655 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ceph\") pod \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.918723 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-inventory\") pod \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.918902 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-pre-adoption-validation-combined-ca-bundle\") pod \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\" (UID: \"801e3632-a0b9-46a0-bff8-0ce14f7f5304\") " Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.922747 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/801e3632-a0b9-46a0-bff8-0ce14f7f5304-kube-api-access-245cx" (OuterVolumeSpecName: "kube-api-access-245cx") pod "801e3632-a0b9-46a0-bff8-0ce14f7f5304" (UID: "801e3632-a0b9-46a0-bff8-0ce14f7f5304"). InnerVolumeSpecName "kube-api-access-245cx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.923683 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "801e3632-a0b9-46a0-bff8-0ce14f7f5304" (UID: "801e3632-a0b9-46a0-bff8-0ce14f7f5304"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.931388 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ceph" (OuterVolumeSpecName: "ceph") pod "801e3632-a0b9-46a0-bff8-0ce14f7f5304" (UID: "801e3632-a0b9-46a0-bff8-0ce14f7f5304"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.946726 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "801e3632-a0b9-46a0-bff8-0ce14f7f5304" (UID: "801e3632-a0b9-46a0-bff8-0ce14f7f5304"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:55 crc kubenswrapper[4940]: I1126 09:01:55.953778 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-inventory" (OuterVolumeSpecName: "inventory") pod "801e3632-a0b9-46a0-bff8-0ce14f7f5304" (UID: "801e3632-a0b9-46a0-bff8-0ce14f7f5304"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:56 crc kubenswrapper[4940]: I1126 09:01:56.021873 4940 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:56 crc kubenswrapper[4940]: I1126 09:01:56.021918 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:56 crc kubenswrapper[4940]: I1126 09:01:56.021954 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-245cx\" (UniqueName: \"kubernetes.io/projected/801e3632-a0b9-46a0-bff8-0ce14f7f5304-kube-api-access-245cx\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:56 crc kubenswrapper[4940]: I1126 09:01:56.021969 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:56 crc kubenswrapper[4940]: I1126 09:01:56.021980 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801e3632-a0b9-46a0-bff8-0ce14f7f5304-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:56 crc kubenswrapper[4940]: I1126 09:01:56.220895 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" event={"ID":"801e3632-a0b9-46a0-bff8-0ce14f7f5304","Type":"ContainerDied","Data":"ded835d697020ed49eb5592b9a2cecdbbfd1f429cd2e4f1f36798e7b46ff092f"} Nov 26 09:01:56 crc kubenswrapper[4940]: I1126 09:01:56.220949 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ded835d697020ed49eb5592b9a2cecdbbfd1f429cd2e4f1f36798e7b46ff092f" Nov 26 09:01:56 crc kubenswrapper[4940]: I1126 09:01:56.221852 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.530414 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2"] Nov 26 09:01:59 crc kubenswrapper[4940]: E1126 09:01:59.531426 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="801e3632-a0b9-46a0-bff8-0ce14f7f5304" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.531444 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="801e3632-a0b9-46a0-bff8-0ce14f7f5304" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 26 09:01:59 crc kubenswrapper[4940]: E1126 09:01:59.531460 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76ea5069-a91e-4ffe-b5c4-41ddd23fd721" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-networ" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.531468 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="76ea5069-a91e-4ffe-b5c4-41ddd23fd721" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-networ" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.531822 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="801e3632-a0b9-46a0-bff8-0ce14f7f5304" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.531840 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="76ea5069-a91e-4ffe-b5c4-41ddd23fd721" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-networ" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.532614 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.535069 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.535353 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.537410 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.539017 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.555331 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2"] Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.574640 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv"] Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.576060 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.579299 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.580069 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.597477 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv"] Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.707748 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.707783 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.707821 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcnb5\" (UniqueName: \"kubernetes.io/projected/47b63e78-a365-4aef-85a8-4ecd8fb825a8-kube-api-access-jcnb5\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.707873 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.707894 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b7tp\" (UniqueName: \"kubernetes.io/projected/b6093962-1e67-478a-a37c-8d21eeb86636-kube-api-access-8b7tp\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.707913 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.707942 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.708000 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.708077 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809513 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809572 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809613 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcnb5\" (UniqueName: \"kubernetes.io/projected/47b63e78-a365-4aef-85a8-4ecd8fb825a8-kube-api-access-jcnb5\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809651 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809682 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b7tp\" (UniqueName: \"kubernetes.io/projected/b6093962-1e67-478a-a37c-8d21eeb86636-kube-api-access-8b7tp\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809712 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809754 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809827 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.809853 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.815658 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.816850 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.816921 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.817256 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.818420 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.818493 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.823358 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.826897 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b7tp\" (UniqueName: \"kubernetes.io/projected/b6093962-1e67-478a-a37c-8d21eeb86636-kube-api-access-8b7tp\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.827493 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcnb5\" (UniqueName: \"kubernetes.io/projected/47b63e78-a365-4aef-85a8-4ecd8fb825a8-kube-api-access-jcnb5\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.848702 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:01:59 crc kubenswrapper[4940]: I1126 09:01:59.905513 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:02:00 crc kubenswrapper[4940]: I1126 09:02:00.545032 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2"] Nov 26 09:02:01 crc kubenswrapper[4940]: I1126 09:02:01.136718 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv"] Nov 26 09:02:01 crc kubenswrapper[4940]: W1126 09:02:01.140312 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6093962_1e67_478a_a37c_8d21eeb86636.slice/crio-9719e0a7a9fd1d4c27b02b4fec4f7724bd21e4f7349e8538c954fe44a90af849 WatchSource:0}: Error finding container 9719e0a7a9fd1d4c27b02b4fec4f7724bd21e4f7349e8538c954fe44a90af849: Status 404 returned error can't find the container with id 9719e0a7a9fd1d4c27b02b4fec4f7724bd21e4f7349e8538c954fe44a90af849 Nov 26 09:02:01 crc kubenswrapper[4940]: I1126 09:02:01.317331 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" event={"ID":"47b63e78-a365-4aef-85a8-4ecd8fb825a8","Type":"ContainerStarted","Data":"14396640000c4c3571f3c65124c9bad49c58e11e2abc5460a950f5b05152f30b"} Nov 26 09:02:01 crc kubenswrapper[4940]: I1126 09:02:01.318811 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" event={"ID":"b6093962-1e67-478a-a37c-8d21eeb86636","Type":"ContainerStarted","Data":"9719e0a7a9fd1d4c27b02b4fec4f7724bd21e4f7349e8538c954fe44a90af849"} Nov 26 09:02:02 crc kubenswrapper[4940]: I1126 09:02:02.329320 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" event={"ID":"47b63e78-a365-4aef-85a8-4ecd8fb825a8","Type":"ContainerStarted","Data":"3978f35fe15e83e7b3d6e097f7bc79d566807d46c0badd32563f52127e12f6a4"} Nov 26 09:02:02 crc kubenswrapper[4940]: I1126 09:02:02.349519 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" podStartSLOduration=2.702550612 podStartE2EDuration="3.349498364s" podCreationTimestamp="2025-11-26 09:01:59 +0000 UTC" firstStartedPulling="2025-11-26 09:02:00.532646251 +0000 UTC m=+7622.052787870" lastFinishedPulling="2025-11-26 09:02:01.179594003 +0000 UTC m=+7622.699735622" observedRunningTime="2025-11-26 09:02:02.345251498 +0000 UTC m=+7623.865393117" watchObservedRunningTime="2025-11-26 09:02:02.349498364 +0000 UTC m=+7623.869639983" Nov 26 09:02:04 crc kubenswrapper[4940]: I1126 09:02:04.166270 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:02:04 crc kubenswrapper[4940]: E1126 09:02:04.167206 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:02:04 crc kubenswrapper[4940]: I1126 09:02:04.355347 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" event={"ID":"b6093962-1e67-478a-a37c-8d21eeb86636","Type":"ContainerStarted","Data":"90efebce1172edfe823a2ccff7e71dee32be3ae8d851674ea7a3483b3968ea5f"} Nov 26 09:02:04 crc kubenswrapper[4940]: I1126 09:02:04.402224 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" podStartSLOduration=3.202955714 podStartE2EDuration="5.402191626s" podCreationTimestamp="2025-11-26 09:01:59 +0000 UTC" firstStartedPulling="2025-11-26 09:02:01.143914847 +0000 UTC m=+7622.664056466" lastFinishedPulling="2025-11-26 09:02:03.343150749 +0000 UTC m=+7624.863292378" observedRunningTime="2025-11-26 09:02:04.396428651 +0000 UTC m=+7625.916570280" watchObservedRunningTime="2025-11-26 09:02:04.402191626 +0000 UTC m=+7625.922333285" Nov 26 09:02:15 crc kubenswrapper[4940]: I1126 09:02:15.162192 4940 scope.go:117] "RemoveContainer" containerID="0c7c5f5ad9ba4e1c3e7713d00d6247eb6be9d9a846684b84076854608f4f5e34" Nov 26 09:02:15 crc kubenswrapper[4940]: I1126 09:02:15.167506 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:02:15 crc kubenswrapper[4940]: E1126 09:02:15.167936 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:02:15 crc kubenswrapper[4940]: I1126 09:02:15.197469 4940 scope.go:117] "RemoveContainer" containerID="e81e18353732eae8341036de7873c52efffce6b957f67be90a2fb3559a29d1b5" Nov 26 09:02:25 crc kubenswrapper[4940]: I1126 09:02:25.057270 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-m9v5m"] Nov 26 09:02:25 crc kubenswrapper[4940]: I1126 09:02:25.068223 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-m9v5m"] Nov 26 09:02:25 crc kubenswrapper[4940]: I1126 09:02:25.178954 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c9f01f4-966a-4757-9bc1-a7097089f833" path="/var/lib/kubelet/pods/8c9f01f4-966a-4757-9bc1-a7097089f833/volumes" Nov 26 09:02:28 crc kubenswrapper[4940]: I1126 09:02:28.166201 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:02:28 crc kubenswrapper[4940]: E1126 09:02:28.166748 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:02:39 crc kubenswrapper[4940]: I1126 09:02:39.172830 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:02:39 crc kubenswrapper[4940]: E1126 09:02:39.173804 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:02:55 crc kubenswrapper[4940]: I1126 09:02:55.165705 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:02:55 crc kubenswrapper[4940]: I1126 09:02:55.931734 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"16092f890c735374f1a9b2f29a12626df1870cc7956ac1999da2f0e61b27af07"} Nov 26 09:03:15 crc kubenswrapper[4940]: I1126 09:03:15.363164 4940 scope.go:117] "RemoveContainer" containerID="906fd0da694e69586e5e97614105f62417217503434eb4be6ef6c6727c84261a" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.710089 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rxm5n"] Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.714891 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.728152 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxm5n"] Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.811104 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-utilities\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.811245 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-catalog-content\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.811308 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kgl4\" (UniqueName: \"kubernetes.io/projected/300010fe-a330-4740-ba3e-a1b751ca5fff-kube-api-access-6kgl4\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.913353 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-catalog-content\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.913779 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgl4\" (UniqueName: \"kubernetes.io/projected/300010fe-a330-4740-ba3e-a1b751ca5fff-kube-api-access-6kgl4\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.913882 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-utilities\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.914014 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-catalog-content\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.914422 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-utilities\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:45 crc kubenswrapper[4940]: I1126 09:03:45.935737 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kgl4\" (UniqueName: \"kubernetes.io/projected/300010fe-a330-4740-ba3e-a1b751ca5fff-kube-api-access-6kgl4\") pod \"redhat-marketplace-rxm5n\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:46 crc kubenswrapper[4940]: I1126 09:03:46.042592 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:46 crc kubenswrapper[4940]: I1126 09:03:46.516492 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxm5n"] Nov 26 09:03:47 crc kubenswrapper[4940]: I1126 09:03:47.510664 4940 generic.go:334] "Generic (PLEG): container finished" podID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerID="e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57" exitCode=0 Nov 26 09:03:47 crc kubenswrapper[4940]: I1126 09:03:47.510743 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxm5n" event={"ID":"300010fe-a330-4740-ba3e-a1b751ca5fff","Type":"ContainerDied","Data":"e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57"} Nov 26 09:03:47 crc kubenswrapper[4940]: I1126 09:03:47.510964 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxm5n" event={"ID":"300010fe-a330-4740-ba3e-a1b751ca5fff","Type":"ContainerStarted","Data":"8c7bcbf8103f1028817240a59c69dbd1b619ee8488a7df95be833af3d60f5910"} Nov 26 09:03:47 crc kubenswrapper[4940]: I1126 09:03:47.514106 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:03:48 crc kubenswrapper[4940]: I1126 09:03:48.522421 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxm5n" event={"ID":"300010fe-a330-4740-ba3e-a1b751ca5fff","Type":"ContainerStarted","Data":"5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d"} Nov 26 09:03:49 crc kubenswrapper[4940]: I1126 09:03:49.536019 4940 generic.go:334] "Generic (PLEG): container finished" podID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerID="5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d" exitCode=0 Nov 26 09:03:49 crc kubenswrapper[4940]: I1126 09:03:49.536144 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxm5n" event={"ID":"300010fe-a330-4740-ba3e-a1b751ca5fff","Type":"ContainerDied","Data":"5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d"} Nov 26 09:03:50 crc kubenswrapper[4940]: I1126 09:03:50.550937 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxm5n" event={"ID":"300010fe-a330-4740-ba3e-a1b751ca5fff","Type":"ContainerStarted","Data":"c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d"} Nov 26 09:03:50 crc kubenswrapper[4940]: I1126 09:03:50.575608 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rxm5n" podStartSLOduration=3.103928661 podStartE2EDuration="5.575588109s" podCreationTimestamp="2025-11-26 09:03:45 +0000 UTC" firstStartedPulling="2025-11-26 09:03:47.513649496 +0000 UTC m=+7729.033791155" lastFinishedPulling="2025-11-26 09:03:49.985308984 +0000 UTC m=+7731.505450603" observedRunningTime="2025-11-26 09:03:50.57214058 +0000 UTC m=+7732.092282199" watchObservedRunningTime="2025-11-26 09:03:50.575588109 +0000 UTC m=+7732.095729728" Nov 26 09:03:56 crc kubenswrapper[4940]: I1126 09:03:56.043498 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:56 crc kubenswrapper[4940]: I1126 09:03:56.044090 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:56 crc kubenswrapper[4940]: I1126 09:03:56.105447 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:56 crc kubenswrapper[4940]: I1126 09:03:56.661170 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:56 crc kubenswrapper[4940]: I1126 09:03:56.719806 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxm5n"] Nov 26 09:03:58 crc kubenswrapper[4940]: I1126 09:03:58.630631 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rxm5n" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerName="registry-server" containerID="cri-o://c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d" gracePeriod=2 Nov 26 09:03:58 crc kubenswrapper[4940]: E1126 09:03:58.897822 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod300010fe_a330_4740_ba3e_a1b751ca5fff.slice/crio-c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d.scope\": RecentStats: unable to find data in memory cache]" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.153011 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.285671 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-utilities\") pod \"300010fe-a330-4740-ba3e-a1b751ca5fff\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.285811 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kgl4\" (UniqueName: \"kubernetes.io/projected/300010fe-a330-4740-ba3e-a1b751ca5fff-kube-api-access-6kgl4\") pod \"300010fe-a330-4740-ba3e-a1b751ca5fff\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.285830 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-catalog-content\") pod \"300010fe-a330-4740-ba3e-a1b751ca5fff\" (UID: \"300010fe-a330-4740-ba3e-a1b751ca5fff\") " Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.286983 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-utilities" (OuterVolumeSpecName: "utilities") pod "300010fe-a330-4740-ba3e-a1b751ca5fff" (UID: "300010fe-a330-4740-ba3e-a1b751ca5fff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.294399 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/300010fe-a330-4740-ba3e-a1b751ca5fff-kube-api-access-6kgl4" (OuterVolumeSpecName: "kube-api-access-6kgl4") pod "300010fe-a330-4740-ba3e-a1b751ca5fff" (UID: "300010fe-a330-4740-ba3e-a1b751ca5fff"). InnerVolumeSpecName "kube-api-access-6kgl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.311365 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "300010fe-a330-4740-ba3e-a1b751ca5fff" (UID: "300010fe-a330-4740-ba3e-a1b751ca5fff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.389971 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.390339 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kgl4\" (UniqueName: \"kubernetes.io/projected/300010fe-a330-4740-ba3e-a1b751ca5fff-kube-api-access-6kgl4\") on node \"crc\" DevicePath \"\"" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.390471 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/300010fe-a330-4740-ba3e-a1b751ca5fff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.643417 4940 generic.go:334] "Generic (PLEG): container finished" podID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerID="c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d" exitCode=0 Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.643453 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxm5n" event={"ID":"300010fe-a330-4740-ba3e-a1b751ca5fff","Type":"ContainerDied","Data":"c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d"} Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.643813 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxm5n" event={"ID":"300010fe-a330-4740-ba3e-a1b751ca5fff","Type":"ContainerDied","Data":"8c7bcbf8103f1028817240a59c69dbd1b619ee8488a7df95be833af3d60f5910"} Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.643846 4940 scope.go:117] "RemoveContainer" containerID="c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.643529 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rxm5n" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.675330 4940 scope.go:117] "RemoveContainer" containerID="5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.693601 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxm5n"] Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.702371 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxm5n"] Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.715776 4940 scope.go:117] "RemoveContainer" containerID="e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.749995 4940 scope.go:117] "RemoveContainer" containerID="c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d" Nov 26 09:03:59 crc kubenswrapper[4940]: E1126 09:03:59.750603 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d\": container with ID starting with c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d not found: ID does not exist" containerID="c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.750650 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d"} err="failed to get container status \"c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d\": rpc error: code = NotFound desc = could not find container \"c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d\": container with ID starting with c84fb3047c8ed3233a49cd847edcb1e7526e88bfabd811ea1a98a812bccd149d not found: ID does not exist" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.750677 4940 scope.go:117] "RemoveContainer" containerID="5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d" Nov 26 09:03:59 crc kubenswrapper[4940]: E1126 09:03:59.751204 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d\": container with ID starting with 5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d not found: ID does not exist" containerID="5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.751254 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d"} err="failed to get container status \"5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d\": rpc error: code = NotFound desc = could not find container \"5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d\": container with ID starting with 5c29417d0f2c66b9ae4e3ad81484e53dd7d8db9a70ef934a079c2bc53d38af6d not found: ID does not exist" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.751290 4940 scope.go:117] "RemoveContainer" containerID="e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57" Nov 26 09:03:59 crc kubenswrapper[4940]: E1126 09:03:59.751640 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57\": container with ID starting with e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57 not found: ID does not exist" containerID="e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57" Nov 26 09:03:59 crc kubenswrapper[4940]: I1126 09:03:59.751677 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57"} err="failed to get container status \"e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57\": rpc error: code = NotFound desc = could not find container \"e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57\": container with ID starting with e3be0baf1616b00d9f226af289983ce36d5d46e0f591f3fdd81d91abd8884a57 not found: ID does not exist" Nov 26 09:04:01 crc kubenswrapper[4940]: I1126 09:04:01.176203 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" path="/var/lib/kubelet/pods/300010fe-a330-4740-ba3e-a1b751ca5fff/volumes" Nov 26 09:05:21 crc kubenswrapper[4940]: I1126 09:05:21.728256 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:05:21 crc kubenswrapper[4940]: I1126 09:05:21.728835 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:05:51 crc kubenswrapper[4940]: I1126 09:05:51.727802 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:05:51 crc kubenswrapper[4940]: I1126 09:05:51.728589 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:06:19 crc kubenswrapper[4940]: I1126 09:06:19.048993 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-f7f7-account-create-update-7xqmf"] Nov 26 09:06:19 crc kubenswrapper[4940]: I1126 09:06:19.059927 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-8jw7v"] Nov 26 09:06:19 crc kubenswrapper[4940]: I1126 09:06:19.068870 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-8jw7v"] Nov 26 09:06:19 crc kubenswrapper[4940]: I1126 09:06:19.077549 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-f7f7-account-create-update-7xqmf"] Nov 26 09:06:19 crc kubenswrapper[4940]: I1126 09:06:19.178519 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfb71163-cff0-41a1-8400-cf97b444d624" path="/var/lib/kubelet/pods/bfb71163-cff0-41a1-8400-cf97b444d624/volumes" Nov 26 09:06:19 crc kubenswrapper[4940]: I1126 09:06:19.179080 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb422d31-3977-4c63-87f8-a90c62bc00f1" path="/var/lib/kubelet/pods/eb422d31-3977-4c63-87f8-a90c62bc00f1/volumes" Nov 26 09:06:21 crc kubenswrapper[4940]: I1126 09:06:21.728320 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:06:21 crc kubenswrapper[4940]: I1126 09:06:21.728906 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:06:21 crc kubenswrapper[4940]: I1126 09:06:21.728952 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:06:21 crc kubenswrapper[4940]: I1126 09:06:21.729656 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"16092f890c735374f1a9b2f29a12626df1870cc7956ac1999da2f0e61b27af07"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:06:21 crc kubenswrapper[4940]: I1126 09:06:21.729727 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://16092f890c735374f1a9b2f29a12626df1870cc7956ac1999da2f0e61b27af07" gracePeriod=600 Nov 26 09:06:22 crc kubenswrapper[4940]: I1126 09:06:22.387406 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="16092f890c735374f1a9b2f29a12626df1870cc7956ac1999da2f0e61b27af07" exitCode=0 Nov 26 09:06:22 crc kubenswrapper[4940]: I1126 09:06:22.387450 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"16092f890c735374f1a9b2f29a12626df1870cc7956ac1999da2f0e61b27af07"} Nov 26 09:06:22 crc kubenswrapper[4940]: I1126 09:06:22.387695 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902"} Nov 26 09:06:22 crc kubenswrapper[4940]: I1126 09:06:22.387714 4940 scope.go:117] "RemoveContainer" containerID="06224ea8c971242b0c7e5421e9a024f614ed3c9963e5ac7d822262f93c49c43f" Nov 26 09:06:24 crc kubenswrapper[4940]: I1126 09:06:24.419659 4940 generic.go:334] "Generic (PLEG): container finished" podID="b6093962-1e67-478a-a37c-8d21eeb86636" containerID="90efebce1172edfe823a2ccff7e71dee32be3ae8d851674ea7a3483b3968ea5f" exitCode=0 Nov 26 09:06:24 crc kubenswrapper[4940]: I1126 09:06:24.419890 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" event={"ID":"b6093962-1e67-478a-a37c-8d21eeb86636","Type":"ContainerDied","Data":"90efebce1172edfe823a2ccff7e71dee32be3ae8d851674ea7a3483b3968ea5f"} Nov 26 09:06:25 crc kubenswrapper[4940]: I1126 09:06:25.952125 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.148102 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-tripleo-cleanup-combined-ca-bundle\") pod \"b6093962-1e67-478a-a37c-8d21eeb86636\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.148237 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-inventory\") pod \"b6093962-1e67-478a-a37c-8d21eeb86636\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.148436 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b7tp\" (UniqueName: \"kubernetes.io/projected/b6093962-1e67-478a-a37c-8d21eeb86636-kube-api-access-8b7tp\") pod \"b6093962-1e67-478a-a37c-8d21eeb86636\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.148484 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-ssh-key\") pod \"b6093962-1e67-478a-a37c-8d21eeb86636\" (UID: \"b6093962-1e67-478a-a37c-8d21eeb86636\") " Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.155157 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "b6093962-1e67-478a-a37c-8d21eeb86636" (UID: "b6093962-1e67-478a-a37c-8d21eeb86636"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.155392 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6093962-1e67-478a-a37c-8d21eeb86636-kube-api-access-8b7tp" (OuterVolumeSpecName: "kube-api-access-8b7tp") pod "b6093962-1e67-478a-a37c-8d21eeb86636" (UID: "b6093962-1e67-478a-a37c-8d21eeb86636"). InnerVolumeSpecName "kube-api-access-8b7tp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.188192 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b6093962-1e67-478a-a37c-8d21eeb86636" (UID: "b6093962-1e67-478a-a37c-8d21eeb86636"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.206719 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-inventory" (OuterVolumeSpecName: "inventory") pod "b6093962-1e67-478a-a37c-8d21eeb86636" (UID: "b6093962-1e67-478a-a37c-8d21eeb86636"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.254055 4940 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.254105 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.254122 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b7tp\" (UniqueName: \"kubernetes.io/projected/b6093962-1e67-478a-a37c-8d21eeb86636-kube-api-access-8b7tp\") on node \"crc\" DevicePath \"\"" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.254136 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6093962-1e67-478a-a37c-8d21eeb86636-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.449591 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" event={"ID":"b6093962-1e67-478a-a37c-8d21eeb86636","Type":"ContainerDied","Data":"9719e0a7a9fd1d4c27b02b4fec4f7724bd21e4f7349e8538c954fe44a90af849"} Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.449635 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9719e0a7a9fd1d4c27b02b4fec4f7724bd21e4f7349e8538c954fe44a90af849" Nov 26 09:06:26 crc kubenswrapper[4940]: I1126 09:06:26.449708 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv" Nov 26 09:06:34 crc kubenswrapper[4940]: I1126 09:06:34.045249 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-kzlhr"] Nov 26 09:06:34 crc kubenswrapper[4940]: I1126 09:06:34.055324 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-kzlhr"] Nov 26 09:06:35 crc kubenswrapper[4940]: I1126 09:06:35.187725 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62b1b100-b34d-47bd-9f08-f6061f6d61c6" path="/var/lib/kubelet/pods/62b1b100-b34d-47bd-9f08-f6061f6d61c6/volumes" Nov 26 09:07:15 crc kubenswrapper[4940]: I1126 09:07:15.533093 4940 scope.go:117] "RemoveContainer" containerID="63f74c87f4b6d16250ec889fbcd83573a9c0c8500b0b298cf0f88595e95651ab" Nov 26 09:07:15 crc kubenswrapper[4940]: I1126 09:07:15.568125 4940 scope.go:117] "RemoveContainer" containerID="607a2df3f21c060272ac0cd8b5e1d06102e684b7d6e337f38c9a24df1730b09a" Nov 26 09:07:15 crc kubenswrapper[4940]: I1126 09:07:15.620176 4940 scope.go:117] "RemoveContainer" containerID="6a7cf35a9878ac612d6692229c60f0dbfa4f3a49269bc26ecea27ed1c854a951" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.119130 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-td42b"] Nov 26 09:08:29 crc kubenswrapper[4940]: E1126 09:08:29.120431 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6093962-1e67-478a-a37c-8d21eeb86636" containerName="tripleo-cleanup-tripleo-cleanup-openstack-networker" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.120456 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6093962-1e67-478a-a37c-8d21eeb86636" containerName="tripleo-cleanup-tripleo-cleanup-openstack-networker" Nov 26 09:08:29 crc kubenswrapper[4940]: E1126 09:08:29.120499 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerName="registry-server" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.120515 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerName="registry-server" Nov 26 09:08:29 crc kubenswrapper[4940]: E1126 09:08:29.120543 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerName="extract-content" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.120555 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerName="extract-content" Nov 26 09:08:29 crc kubenswrapper[4940]: E1126 09:08:29.120631 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerName="extract-utilities" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.120644 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerName="extract-utilities" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.120980 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="300010fe-a330-4740-ba3e-a1b751ca5fff" containerName="registry-server" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.121017 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6093962-1e67-478a-a37c-8d21eeb86636" containerName="tripleo-cleanup-tripleo-cleanup-openstack-networker" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.123994 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.130871 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-td42b"] Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.264624 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-utilities\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.264980 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-catalog-content\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.265340 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfkf8\" (UniqueName: \"kubernetes.io/projected/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-kube-api-access-kfkf8\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.367251 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-utilities\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.367331 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-catalog-content\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.367413 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfkf8\" (UniqueName: \"kubernetes.io/projected/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-kube-api-access-kfkf8\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.367831 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-utilities\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.367863 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-catalog-content\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.389099 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfkf8\" (UniqueName: \"kubernetes.io/projected/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-kube-api-access-kfkf8\") pod \"certified-operators-td42b\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:29 crc kubenswrapper[4940]: I1126 09:08:29.451228 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:30 crc kubenswrapper[4940]: I1126 09:08:30.028134 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-td42b"] Nov 26 09:08:30 crc kubenswrapper[4940]: W1126 09:08:30.033304 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda92ec5c4_9655_49fa_84c8_c26e52e9fa0d.slice/crio-293056f8381a44983a765bf4f46e448a83496ab810bf87778f21d35459e1dab4 WatchSource:0}: Error finding container 293056f8381a44983a765bf4f46e448a83496ab810bf87778f21d35459e1dab4: Status 404 returned error can't find the container with id 293056f8381a44983a765bf4f46e448a83496ab810bf87778f21d35459e1dab4 Nov 26 09:08:30 crc kubenswrapper[4940]: I1126 09:08:30.882698 4940 generic.go:334] "Generic (PLEG): container finished" podID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerID="21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634" exitCode=0 Nov 26 09:08:30 crc kubenswrapper[4940]: I1126 09:08:30.882789 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-td42b" event={"ID":"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d","Type":"ContainerDied","Data":"21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634"} Nov 26 09:08:30 crc kubenswrapper[4940]: I1126 09:08:30.883013 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-td42b" event={"ID":"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d","Type":"ContainerStarted","Data":"293056f8381a44983a765bf4f46e448a83496ab810bf87778f21d35459e1dab4"} Nov 26 09:08:31 crc kubenswrapper[4940]: I1126 09:08:31.898679 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-td42b" event={"ID":"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d","Type":"ContainerStarted","Data":"ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88"} Nov 26 09:08:33 crc kubenswrapper[4940]: I1126 09:08:33.929860 4940 generic.go:334] "Generic (PLEG): container finished" podID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerID="ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88" exitCode=0 Nov 26 09:08:33 crc kubenswrapper[4940]: I1126 09:08:33.929951 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-td42b" event={"ID":"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d","Type":"ContainerDied","Data":"ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88"} Nov 26 09:08:34 crc kubenswrapper[4940]: I1126 09:08:34.943722 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-td42b" event={"ID":"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d","Type":"ContainerStarted","Data":"4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177"} Nov 26 09:08:34 crc kubenswrapper[4940]: I1126 09:08:34.979097 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-td42b" podStartSLOduration=2.500977863 podStartE2EDuration="5.979075837s" podCreationTimestamp="2025-11-26 09:08:29 +0000 UTC" firstStartedPulling="2025-11-26 09:08:30.885599709 +0000 UTC m=+8012.405741328" lastFinishedPulling="2025-11-26 09:08:34.363697683 +0000 UTC m=+8015.883839302" observedRunningTime="2025-11-26 09:08:34.962626174 +0000 UTC m=+8016.482767793" watchObservedRunningTime="2025-11-26 09:08:34.979075837 +0000 UTC m=+8016.499217466" Nov 26 09:08:36 crc kubenswrapper[4940]: I1126 09:08:36.048905 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-j7fd4"] Nov 26 09:08:36 crc kubenswrapper[4940]: I1126 09:08:36.060146 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-68c8-account-create-update-r5xqd"] Nov 26 09:08:36 crc kubenswrapper[4940]: I1126 09:08:36.075173 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-j7fd4"] Nov 26 09:08:36 crc kubenswrapper[4940]: I1126 09:08:36.079862 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-68c8-account-create-update-r5xqd"] Nov 26 09:08:37 crc kubenswrapper[4940]: I1126 09:08:37.183918 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04e6903d-de70-4c7b-b9aa-d245708db3fc" path="/var/lib/kubelet/pods/04e6903d-de70-4c7b-b9aa-d245708db3fc/volumes" Nov 26 09:08:37 crc kubenswrapper[4940]: I1126 09:08:37.185431 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5160b9cf-50dd-4e95-a8b4-b0847dbb687e" path="/var/lib/kubelet/pods/5160b9cf-50dd-4e95-a8b4-b0847dbb687e/volumes" Nov 26 09:08:39 crc kubenswrapper[4940]: I1126 09:08:39.451830 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:39 crc kubenswrapper[4940]: I1126 09:08:39.452284 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:39 crc kubenswrapper[4940]: I1126 09:08:39.543800 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:40 crc kubenswrapper[4940]: I1126 09:08:40.088475 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:40 crc kubenswrapper[4940]: I1126 09:08:40.145684 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-td42b"] Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.022996 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-td42b" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerName="registry-server" containerID="cri-o://4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177" gracePeriod=2 Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.625861 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.767840 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfkf8\" (UniqueName: \"kubernetes.io/projected/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-kube-api-access-kfkf8\") pod \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.767902 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-utilities\") pod \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.767948 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-catalog-content\") pod \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\" (UID: \"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d\") " Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.770628 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-utilities" (OuterVolumeSpecName: "utilities") pod "a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" (UID: "a92ec5c4-9655-49fa-84c8-c26e52e9fa0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.773589 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-kube-api-access-kfkf8" (OuterVolumeSpecName: "kube-api-access-kfkf8") pod "a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" (UID: "a92ec5c4-9655-49fa-84c8-c26e52e9fa0d"). InnerVolumeSpecName "kube-api-access-kfkf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.811080 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" (UID: "a92ec5c4-9655-49fa-84c8-c26e52e9fa0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.869900 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfkf8\" (UniqueName: \"kubernetes.io/projected/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-kube-api-access-kfkf8\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.869931 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:42 crc kubenswrapper[4940]: I1126 09:08:42.869944 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.038216 4940 generic.go:334] "Generic (PLEG): container finished" podID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerID="4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177" exitCode=0 Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.038313 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-td42b" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.038301 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-td42b" event={"ID":"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d","Type":"ContainerDied","Data":"4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177"} Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.038563 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-td42b" event={"ID":"a92ec5c4-9655-49fa-84c8-c26e52e9fa0d","Type":"ContainerDied","Data":"293056f8381a44983a765bf4f46e448a83496ab810bf87778f21d35459e1dab4"} Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.038609 4940 scope.go:117] "RemoveContainer" containerID="4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.070675 4940 scope.go:117] "RemoveContainer" containerID="ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.121682 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-td42b"] Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.133092 4940 scope.go:117] "RemoveContainer" containerID="21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.151447 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-td42b"] Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.172812 4940 scope.go:117] "RemoveContainer" containerID="4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177" Nov 26 09:08:43 crc kubenswrapper[4940]: E1126 09:08:43.177819 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177\": container with ID starting with 4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177 not found: ID does not exist" containerID="4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.177911 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177"} err="failed to get container status \"4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177\": rpc error: code = NotFound desc = could not find container \"4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177\": container with ID starting with 4709edad97be83b46f6ccd06e164d59bb85178a1e669aff99ad84a8bdaf81177 not found: ID does not exist" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.177968 4940 scope.go:117] "RemoveContainer" containerID="ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88" Nov 26 09:08:43 crc kubenswrapper[4940]: E1126 09:08:43.178825 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88\": container with ID starting with ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88 not found: ID does not exist" containerID="ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.178892 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88"} err="failed to get container status \"ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88\": rpc error: code = NotFound desc = could not find container \"ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88\": container with ID starting with ee20916ae51cc3d3fb6a6e9a4ee0e74705d6eba288c9d1734ca3e059c4cf2e88 not found: ID does not exist" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.178939 4940 scope.go:117] "RemoveContainer" containerID="21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634" Nov 26 09:08:43 crc kubenswrapper[4940]: E1126 09:08:43.181885 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634\": container with ID starting with 21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634 not found: ID does not exist" containerID="21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.181941 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634"} err="failed to get container status \"21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634\": rpc error: code = NotFound desc = could not find container \"21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634\": container with ID starting with 21812f3a07c95dd41b91b1ccacd3c75accf239da1625a503749a27b9d6896634 not found: ID does not exist" Nov 26 09:08:43 crc kubenswrapper[4940]: I1126 09:08:43.199878 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" path="/var/lib/kubelet/pods/a92ec5c4-9655-49fa-84c8-c26e52e9fa0d/volumes" Nov 26 09:08:47 crc kubenswrapper[4940]: I1126 09:08:47.030486 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-5jzzk"] Nov 26 09:08:47 crc kubenswrapper[4940]: I1126 09:08:47.041583 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-5jzzk"] Nov 26 09:08:47 crc kubenswrapper[4940]: I1126 09:08:47.177757 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce" path="/var/lib/kubelet/pods/36ea1c73-6551-4fbe-a6a0-7af1bd8cbfce/volumes" Nov 26 09:08:51 crc kubenswrapper[4940]: I1126 09:08:51.727969 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:08:51 crc kubenswrapper[4940]: I1126 09:08:51.728490 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:09:15 crc kubenswrapper[4940]: I1126 09:09:15.793001 4940 scope.go:117] "RemoveContainer" containerID="91d0469357e39e349bd300b71a5b64621f84ced0690d71a325fc889eb7dcace6" Nov 26 09:09:15 crc kubenswrapper[4940]: I1126 09:09:15.842923 4940 scope.go:117] "RemoveContainer" containerID="91727520c673b658e892a75735af20c2f64b87bc61fefaa5d9db70480e72e25d" Nov 26 09:09:15 crc kubenswrapper[4940]: I1126 09:09:15.901984 4940 scope.go:117] "RemoveContainer" containerID="5d2ee2cab527127cfc2ce78991953831c352791e9763cb4611646ec99f2dfc80" Nov 26 09:09:16 crc kubenswrapper[4940]: I1126 09:09:16.058171 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-bb8e-account-create-update-rxrfg"] Nov 26 09:09:16 crc kubenswrapper[4940]: I1126 09:09:16.071134 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-8ww7r"] Nov 26 09:09:16 crc kubenswrapper[4940]: I1126 09:09:16.079603 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-8ww7r"] Nov 26 09:09:16 crc kubenswrapper[4940]: I1126 09:09:16.087475 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-bb8e-account-create-update-rxrfg"] Nov 26 09:09:17 crc kubenswrapper[4940]: I1126 09:09:17.180992 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90a9767e-6575-42eb-a5fd-15b4b24ce12e" path="/var/lib/kubelet/pods/90a9767e-6575-42eb-a5fd-15b4b24ce12e/volumes" Nov 26 09:09:17 crc kubenswrapper[4940]: I1126 09:09:17.182195 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a83f6edc-021b-4530-a459-13a1cacd0e9b" path="/var/lib/kubelet/pods/a83f6edc-021b-4530-a459-13a1cacd0e9b/volumes" Nov 26 09:09:21 crc kubenswrapper[4940]: I1126 09:09:21.728762 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:09:21 crc kubenswrapper[4940]: I1126 09:09:21.729412 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:09:30 crc kubenswrapper[4940]: I1126 09:09:30.032417 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-ddsss"] Nov 26 09:09:30 crc kubenswrapper[4940]: I1126 09:09:30.042873 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-ddsss"] Nov 26 09:09:31 crc kubenswrapper[4940]: I1126 09:09:31.184787 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9669e1c-3fe0-49e5-bf17-012bf16e9944" path="/var/lib/kubelet/pods/f9669e1c-3fe0-49e5-bf17-012bf16e9944/volumes" Nov 26 09:09:51 crc kubenswrapper[4940]: I1126 09:09:51.727982 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:09:51 crc kubenswrapper[4940]: I1126 09:09:51.728562 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:09:51 crc kubenswrapper[4940]: I1126 09:09:51.728613 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:09:51 crc kubenswrapper[4940]: I1126 09:09:51.729385 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:09:51 crc kubenswrapper[4940]: I1126 09:09:51.729439 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" gracePeriod=600 Nov 26 09:09:51 crc kubenswrapper[4940]: E1126 09:09:51.858629 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:09:52 crc kubenswrapper[4940]: I1126 09:09:52.825474 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" exitCode=0 Nov 26 09:09:52 crc kubenswrapper[4940]: I1126 09:09:52.825526 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902"} Nov 26 09:09:52 crc kubenswrapper[4940]: I1126 09:09:52.825596 4940 scope.go:117] "RemoveContainer" containerID="16092f890c735374f1a9b2f29a12626df1870cc7956ac1999da2f0e61b27af07" Nov 26 09:09:52 crc kubenswrapper[4940]: I1126 09:09:52.826540 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:09:52 crc kubenswrapper[4940]: E1126 09:09:52.827363 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:10:04 crc kubenswrapper[4940]: I1126 09:10:04.165568 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:10:04 crc kubenswrapper[4940]: E1126 09:10:04.166264 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.579508 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lzccw"] Nov 26 09:10:12 crc kubenswrapper[4940]: E1126 09:10:12.580438 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerName="extract-content" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.580450 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerName="extract-content" Nov 26 09:10:12 crc kubenswrapper[4940]: E1126 09:10:12.580463 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerName="registry-server" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.580469 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerName="registry-server" Nov 26 09:10:12 crc kubenswrapper[4940]: E1126 09:10:12.580485 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerName="extract-utilities" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.580491 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerName="extract-utilities" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.580670 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a92ec5c4-9655-49fa-84c8-c26e52e9fa0d" containerName="registry-server" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.582227 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.597608 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lzccw"] Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.725465 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-catalog-content\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.725552 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfl2l\" (UniqueName: \"kubernetes.io/projected/77b0d1be-b485-461f-9786-d2e3a9b0759d-kube-api-access-mfl2l\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.725686 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-utilities\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.827672 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfl2l\" (UniqueName: \"kubernetes.io/projected/77b0d1be-b485-461f-9786-d2e3a9b0759d-kube-api-access-mfl2l\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.827824 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-utilities\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.827887 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-catalog-content\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.828418 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-utilities\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.828476 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-catalog-content\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.856998 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfl2l\" (UniqueName: \"kubernetes.io/projected/77b0d1be-b485-461f-9786-d2e3a9b0759d-kube-api-access-mfl2l\") pod \"community-operators-lzccw\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:12 crc kubenswrapper[4940]: I1126 09:10:12.913454 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:13 crc kubenswrapper[4940]: I1126 09:10:13.504100 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lzccw"] Nov 26 09:10:14 crc kubenswrapper[4940]: I1126 09:10:14.074410 4940 generic.go:334] "Generic (PLEG): container finished" podID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerID="516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718" exitCode=0 Nov 26 09:10:14 crc kubenswrapper[4940]: I1126 09:10:14.074491 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzccw" event={"ID":"77b0d1be-b485-461f-9786-d2e3a9b0759d","Type":"ContainerDied","Data":"516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718"} Nov 26 09:10:14 crc kubenswrapper[4940]: I1126 09:10:14.074698 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzccw" event={"ID":"77b0d1be-b485-461f-9786-d2e3a9b0759d","Type":"ContainerStarted","Data":"1db876a775681bde3ab55367e704e03e5d42d07f4e1ca92b2cf9cad5487932ec"} Nov 26 09:10:14 crc kubenswrapper[4940]: I1126 09:10:14.076774 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:10:15 crc kubenswrapper[4940]: I1126 09:10:15.087957 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzccw" event={"ID":"77b0d1be-b485-461f-9786-d2e3a9b0759d","Type":"ContainerStarted","Data":"5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4"} Nov 26 09:10:15 crc kubenswrapper[4940]: I1126 09:10:15.166207 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:10:15 crc kubenswrapper[4940]: E1126 09:10:15.166474 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:10:16 crc kubenswrapper[4940]: I1126 09:10:16.087205 4940 scope.go:117] "RemoveContainer" containerID="7156b95c49d84fa1b4c6984d73e01fb28a808dd8197d59bbd97be99d5590b5cb" Nov 26 09:10:16 crc kubenswrapper[4940]: I1126 09:10:16.105520 4940 generic.go:334] "Generic (PLEG): container finished" podID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerID="5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4" exitCode=0 Nov 26 09:10:16 crc kubenswrapper[4940]: I1126 09:10:16.105573 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzccw" event={"ID":"77b0d1be-b485-461f-9786-d2e3a9b0759d","Type":"ContainerDied","Data":"5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4"} Nov 26 09:10:16 crc kubenswrapper[4940]: I1126 09:10:16.138239 4940 scope.go:117] "RemoveContainer" containerID="39aa83f6c8d4075b792cc1f0e9c2c9955a870f9f1c9e38e020d6dbc8800ee30c" Nov 26 09:10:16 crc kubenswrapper[4940]: I1126 09:10:16.167112 4940 scope.go:117] "RemoveContainer" containerID="6474e80085ddce58dc6f946c50ba1c520fded4a4ae03eca65358f56b4c38f263" Nov 26 09:10:17 crc kubenswrapper[4940]: I1126 09:10:17.116401 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzccw" event={"ID":"77b0d1be-b485-461f-9786-d2e3a9b0759d","Type":"ContainerStarted","Data":"5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a"} Nov 26 09:10:17 crc kubenswrapper[4940]: I1126 09:10:17.138129 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lzccw" podStartSLOduration=2.327298577 podStartE2EDuration="5.138113757s" podCreationTimestamp="2025-11-26 09:10:12 +0000 UTC" firstStartedPulling="2025-11-26 09:10:14.076514755 +0000 UTC m=+8115.596656374" lastFinishedPulling="2025-11-26 09:10:16.887329945 +0000 UTC m=+8118.407471554" observedRunningTime="2025-11-26 09:10:17.133412938 +0000 UTC m=+8118.653554587" watchObservedRunningTime="2025-11-26 09:10:17.138113757 +0000 UTC m=+8118.658255366" Nov 26 09:10:22 crc kubenswrapper[4940]: I1126 09:10:22.913817 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:22 crc kubenswrapper[4940]: I1126 09:10:22.914388 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:22 crc kubenswrapper[4940]: I1126 09:10:22.972058 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:23 crc kubenswrapper[4940]: I1126 09:10:23.218792 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:23 crc kubenswrapper[4940]: I1126 09:10:23.296054 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lzccw"] Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.189937 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lzccw" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerName="registry-server" containerID="cri-o://5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a" gracePeriod=2 Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.702390 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.821862 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfl2l\" (UniqueName: \"kubernetes.io/projected/77b0d1be-b485-461f-9786-d2e3a9b0759d-kube-api-access-mfl2l\") pod \"77b0d1be-b485-461f-9786-d2e3a9b0759d\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.822250 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-utilities\") pod \"77b0d1be-b485-461f-9786-d2e3a9b0759d\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.822512 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-catalog-content\") pod \"77b0d1be-b485-461f-9786-d2e3a9b0759d\" (UID: \"77b0d1be-b485-461f-9786-d2e3a9b0759d\") " Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.823021 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-utilities" (OuterVolumeSpecName: "utilities") pod "77b0d1be-b485-461f-9786-d2e3a9b0759d" (UID: "77b0d1be-b485-461f-9786-d2e3a9b0759d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.830289 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77b0d1be-b485-461f-9786-d2e3a9b0759d-kube-api-access-mfl2l" (OuterVolumeSpecName: "kube-api-access-mfl2l") pod "77b0d1be-b485-461f-9786-d2e3a9b0759d" (UID: "77b0d1be-b485-461f-9786-d2e3a9b0759d"). InnerVolumeSpecName "kube-api-access-mfl2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.894181 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77b0d1be-b485-461f-9786-d2e3a9b0759d" (UID: "77b0d1be-b485-461f-9786-d2e3a9b0759d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.924687 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.924720 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfl2l\" (UniqueName: \"kubernetes.io/projected/77b0d1be-b485-461f-9786-d2e3a9b0759d-kube-api-access-mfl2l\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:25 crc kubenswrapper[4940]: I1126 09:10:25.924731 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b0d1be-b485-461f-9786-d2e3a9b0759d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.165945 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:10:26 crc kubenswrapper[4940]: E1126 09:10:26.166295 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.213780 4940 generic.go:334] "Generic (PLEG): container finished" podID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerID="5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a" exitCode=0 Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.213837 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzccw" event={"ID":"77b0d1be-b485-461f-9786-d2e3a9b0759d","Type":"ContainerDied","Data":"5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a"} Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.213872 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lzccw" event={"ID":"77b0d1be-b485-461f-9786-d2e3a9b0759d","Type":"ContainerDied","Data":"1db876a775681bde3ab55367e704e03e5d42d07f4e1ca92b2cf9cad5487932ec"} Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.213898 4940 scope.go:117] "RemoveContainer" containerID="5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.214172 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lzccw" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.242253 4940 scope.go:117] "RemoveContainer" containerID="5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.256182 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lzccw"] Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.267293 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lzccw"] Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.286593 4940 scope.go:117] "RemoveContainer" containerID="516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.327298 4940 scope.go:117] "RemoveContainer" containerID="5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a" Nov 26 09:10:26 crc kubenswrapper[4940]: E1126 09:10:26.327825 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a\": container with ID starting with 5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a not found: ID does not exist" containerID="5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.327956 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a"} err="failed to get container status \"5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a\": rpc error: code = NotFound desc = could not find container \"5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a\": container with ID starting with 5457fb09a5e714d5f2482778b72f18aeae1f85dc9932c41cb9979cf16f5d376a not found: ID does not exist" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.328075 4940 scope.go:117] "RemoveContainer" containerID="5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4" Nov 26 09:10:26 crc kubenswrapper[4940]: E1126 09:10:26.328714 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4\": container with ID starting with 5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4 not found: ID does not exist" containerID="5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.328748 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4"} err="failed to get container status \"5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4\": rpc error: code = NotFound desc = could not find container \"5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4\": container with ID starting with 5cd01f5feb76c170578f44b7f0b574f0330eee76ecf2b7d006f8e1cae9dcb4b4 not found: ID does not exist" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.328772 4940 scope.go:117] "RemoveContainer" containerID="516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718" Nov 26 09:10:26 crc kubenswrapper[4940]: E1126 09:10:26.329141 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718\": container with ID starting with 516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718 not found: ID does not exist" containerID="516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718" Nov 26 09:10:26 crc kubenswrapper[4940]: I1126 09:10:26.329247 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718"} err="failed to get container status \"516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718\": rpc error: code = NotFound desc = could not find container \"516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718\": container with ID starting with 516bf02cb5224fe5678013ebf3a2ac0e53378a036c5114898476dcb1d47b4718 not found: ID does not exist" Nov 26 09:10:27 crc kubenswrapper[4940]: I1126 09:10:27.184484 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" path="/var/lib/kubelet/pods/77b0d1be-b485-461f-9786-d2e3a9b0759d/volumes" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.224783 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bzvhc"] Nov 26 09:10:28 crc kubenswrapper[4940]: E1126 09:10:28.225403 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerName="registry-server" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.225421 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerName="registry-server" Nov 26 09:10:28 crc kubenswrapper[4940]: E1126 09:10:28.225431 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerName="extract-content" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.225437 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerName="extract-content" Nov 26 09:10:28 crc kubenswrapper[4940]: E1126 09:10:28.225455 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerName="extract-utilities" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.225461 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerName="extract-utilities" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.225726 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="77b0d1be-b485-461f-9786-d2e3a9b0759d" containerName="registry-server" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.228631 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.239731 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bzvhc"] Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.275387 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdgkr\" (UniqueName: \"kubernetes.io/projected/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-kube-api-access-cdgkr\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.275515 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-catalog-content\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.275643 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-utilities\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.377334 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-utilities\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.377506 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdgkr\" (UniqueName: \"kubernetes.io/projected/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-kube-api-access-cdgkr\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.377605 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-catalog-content\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.378240 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-catalog-content\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.378245 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-utilities\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.403364 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdgkr\" (UniqueName: \"kubernetes.io/projected/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-kube-api-access-cdgkr\") pod \"redhat-operators-bzvhc\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:28 crc kubenswrapper[4940]: I1126 09:10:28.548129 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:29 crc kubenswrapper[4940]: I1126 09:10:29.023515 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bzvhc"] Nov 26 09:10:29 crc kubenswrapper[4940]: I1126 09:10:29.244946 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzvhc" event={"ID":"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6","Type":"ContainerStarted","Data":"b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b"} Nov 26 09:10:29 crc kubenswrapper[4940]: I1126 09:10:29.244998 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzvhc" event={"ID":"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6","Type":"ContainerStarted","Data":"df91123f835143ccfdb866b15fb6b657e8207e650862892a71025b701d4f3881"} Nov 26 09:10:30 crc kubenswrapper[4940]: I1126 09:10:30.264472 4940 generic.go:334] "Generic (PLEG): container finished" podID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerID="b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b" exitCode=0 Nov 26 09:10:30 crc kubenswrapper[4940]: I1126 09:10:30.264833 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzvhc" event={"ID":"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6","Type":"ContainerDied","Data":"b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b"} Nov 26 09:10:32 crc kubenswrapper[4940]: I1126 09:10:32.287775 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzvhc" event={"ID":"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6","Type":"ContainerStarted","Data":"6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7"} Nov 26 09:10:33 crc kubenswrapper[4940]: I1126 09:10:33.300273 4940 generic.go:334] "Generic (PLEG): container finished" podID="47b63e78-a365-4aef-85a8-4ecd8fb825a8" containerID="3978f35fe15e83e7b3d6e097f7bc79d566807d46c0badd32563f52127e12f6a4" exitCode=0 Nov 26 09:10:33 crc kubenswrapper[4940]: I1126 09:10:33.300343 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" event={"ID":"47b63e78-a365-4aef-85a8-4ecd8fb825a8","Type":"ContainerDied","Data":"3978f35fe15e83e7b3d6e097f7bc79d566807d46c0badd32563f52127e12f6a4"} Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.779097 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.928331 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ceph\") pod \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.928501 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcnb5\" (UniqueName: \"kubernetes.io/projected/47b63e78-a365-4aef-85a8-4ecd8fb825a8-kube-api-access-jcnb5\") pod \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.928614 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-inventory\") pod \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.928667 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ssh-key\") pod \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.928688 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-tripleo-cleanup-combined-ca-bundle\") pod \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\" (UID: \"47b63e78-a365-4aef-85a8-4ecd8fb825a8\") " Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.934988 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ceph" (OuterVolumeSpecName: "ceph") pod "47b63e78-a365-4aef-85a8-4ecd8fb825a8" (UID: "47b63e78-a365-4aef-85a8-4ecd8fb825a8"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.935376 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47b63e78-a365-4aef-85a8-4ecd8fb825a8-kube-api-access-jcnb5" (OuterVolumeSpecName: "kube-api-access-jcnb5") pod "47b63e78-a365-4aef-85a8-4ecd8fb825a8" (UID: "47b63e78-a365-4aef-85a8-4ecd8fb825a8"). InnerVolumeSpecName "kube-api-access-jcnb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.937420 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "47b63e78-a365-4aef-85a8-4ecd8fb825a8" (UID: "47b63e78-a365-4aef-85a8-4ecd8fb825a8"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.961135 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "47b63e78-a365-4aef-85a8-4ecd8fb825a8" (UID: "47b63e78-a365-4aef-85a8-4ecd8fb825a8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:10:34 crc kubenswrapper[4940]: I1126 09:10:34.984393 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-inventory" (OuterVolumeSpecName: "inventory") pod "47b63e78-a365-4aef-85a8-4ecd8fb825a8" (UID: "47b63e78-a365-4aef-85a8-4ecd8fb825a8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:10:35 crc kubenswrapper[4940]: I1126 09:10:35.031052 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:35 crc kubenswrapper[4940]: I1126 09:10:35.031080 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:35 crc kubenswrapper[4940]: I1126 09:10:35.031091 4940 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:35 crc kubenswrapper[4940]: I1126 09:10:35.031103 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/47b63e78-a365-4aef-85a8-4ecd8fb825a8-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:35 crc kubenswrapper[4940]: I1126 09:10:35.031113 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcnb5\" (UniqueName: \"kubernetes.io/projected/47b63e78-a365-4aef-85a8-4ecd8fb825a8-kube-api-access-jcnb5\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:35 crc kubenswrapper[4940]: I1126 09:10:35.331413 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" event={"ID":"47b63e78-a365-4aef-85a8-4ecd8fb825a8","Type":"ContainerDied","Data":"14396640000c4c3571f3c65124c9bad49c58e11e2abc5460a950f5b05152f30b"} Nov 26 09:10:35 crc kubenswrapper[4940]: I1126 09:10:35.338404 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14396640000c4c3571f3c65124c9bad49c58e11e2abc5460a950f5b05152f30b" Nov 26 09:10:35 crc kubenswrapper[4940]: I1126 09:10:35.331865 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2" Nov 26 09:10:36 crc kubenswrapper[4940]: I1126 09:10:36.347093 4940 generic.go:334] "Generic (PLEG): container finished" podID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerID="6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7" exitCode=0 Nov 26 09:10:36 crc kubenswrapper[4940]: I1126 09:10:36.347148 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzvhc" event={"ID":"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6","Type":"ContainerDied","Data":"6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7"} Nov 26 09:10:37 crc kubenswrapper[4940]: I1126 09:10:37.166145 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:10:37 crc kubenswrapper[4940]: E1126 09:10:37.166580 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:10:37 crc kubenswrapper[4940]: I1126 09:10:37.361530 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzvhc" event={"ID":"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6","Type":"ContainerStarted","Data":"d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd"} Nov 26 09:10:37 crc kubenswrapper[4940]: I1126 09:10:37.395820 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bzvhc" podStartSLOduration=2.644936838 podStartE2EDuration="9.395799296s" podCreationTimestamp="2025-11-26 09:10:28 +0000 UTC" firstStartedPulling="2025-11-26 09:10:30.266944072 +0000 UTC m=+8131.787085691" lastFinishedPulling="2025-11-26 09:10:37.01780653 +0000 UTC m=+8138.537948149" observedRunningTime="2025-11-26 09:10:37.386632645 +0000 UTC m=+8138.906774284" watchObservedRunningTime="2025-11-26 09:10:37.395799296 +0000 UTC m=+8138.915940915" Nov 26 09:10:38 crc kubenswrapper[4940]: I1126 09:10:38.548590 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:38 crc kubenswrapper[4940]: I1126 09:10:38.549090 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:39 crc kubenswrapper[4940]: I1126 09:10:39.609684 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bzvhc" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="registry-server" probeResult="failure" output=< Nov 26 09:10:39 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:10:39 crc kubenswrapper[4940]: > Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.371857 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-jrhf2"] Nov 26 09:10:44 crc kubenswrapper[4940]: E1126 09:10:44.372908 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47b63e78-a365-4aef-85a8-4ecd8fb825a8" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.372928 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="47b63e78-a365-4aef-85a8-4ecd8fb825a8" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.373269 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="47b63e78-a365-4aef-85a8-4ecd8fb825a8" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.374268 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.377337 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.378368 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.378484 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.379518 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.383715 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-jrhf2"] Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.424808 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-networker-rn57g"] Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.435152 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.437929 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-networker-rn57g"] Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.439278 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.440181 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544086 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544151 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544180 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-ssh-key\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544221 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m6td\" (UniqueName: \"kubernetes.io/projected/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-kube-api-access-8m6td\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544295 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj6nx\" (UniqueName: \"kubernetes.io/projected/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-kube-api-access-nj6nx\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544369 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544421 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ceph\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544445 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-inventory\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.544471 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-inventory\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.645783 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.645841 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-ssh-key\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.645876 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m6td\" (UniqueName: \"kubernetes.io/projected/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-kube-api-access-8m6td\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.645944 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj6nx\" (UniqueName: \"kubernetes.io/projected/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-kube-api-access-nj6nx\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.645966 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.646000 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ceph\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.646020 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-inventory\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.646059 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-inventory\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.646166 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.652645 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-inventory\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.652679 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ceph\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.653149 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-ssh-key\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.653840 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.654407 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.655920 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.656500 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-inventory\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.665928 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m6td\" (UniqueName: \"kubernetes.io/projected/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-kube-api-access-8m6td\") pod \"bootstrap-openstack-openstack-cell1-jrhf2\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.666483 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj6nx\" (UniqueName: \"kubernetes.io/projected/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-kube-api-access-nj6nx\") pod \"bootstrap-openstack-openstack-networker-rn57g\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.719776 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:10:44 crc kubenswrapper[4940]: I1126 09:10:44.758449 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:10:45 crc kubenswrapper[4940]: W1126 09:10:45.284223 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podded13a4c_1e1b_4ddc_a6a4_33f15ec88fa4.slice/crio-7b5528a1751da6995d849877e0cb05922afbb6a42b53481c2223de81ae3c8dcc WatchSource:0}: Error finding container 7b5528a1751da6995d849877e0cb05922afbb6a42b53481c2223de81ae3c8dcc: Status 404 returned error can't find the container with id 7b5528a1751da6995d849877e0cb05922afbb6a42b53481c2223de81ae3c8dcc Nov 26 09:10:45 crc kubenswrapper[4940]: I1126 09:10:45.286090 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-jrhf2"] Nov 26 09:10:45 crc kubenswrapper[4940]: I1126 09:10:45.394127 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-networker-rn57g"] Nov 26 09:10:45 crc kubenswrapper[4940]: W1126 09:10:45.395661 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d78b5da_8b3a_43fa_a3c0_dbdce05784a7.slice/crio-2ea27ffe5e21e3ae3225ac16e3a2d9a685d3e952a3f3a0e9f7763edaa9381078 WatchSource:0}: Error finding container 2ea27ffe5e21e3ae3225ac16e3a2d9a685d3e952a3f3a0e9f7763edaa9381078: Status 404 returned error can't find the container with id 2ea27ffe5e21e3ae3225ac16e3a2d9a685d3e952a3f3a0e9f7763edaa9381078 Nov 26 09:10:45 crc kubenswrapper[4940]: I1126 09:10:45.456593 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" event={"ID":"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4","Type":"ContainerStarted","Data":"7b5528a1751da6995d849877e0cb05922afbb6a42b53481c2223de81ae3c8dcc"} Nov 26 09:10:45 crc kubenswrapper[4940]: I1126 09:10:45.458188 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" event={"ID":"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7","Type":"ContainerStarted","Data":"2ea27ffe5e21e3ae3225ac16e3a2d9a685d3e952a3f3a0e9f7763edaa9381078"} Nov 26 09:10:47 crc kubenswrapper[4940]: I1126 09:10:47.496189 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" event={"ID":"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4","Type":"ContainerStarted","Data":"b471f93cb2b6bdbfb3e851664a8989751aae2b82049f980d794e6dd5e22db48c"} Nov 26 09:10:47 crc kubenswrapper[4940]: I1126 09:10:47.498500 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" event={"ID":"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7","Type":"ContainerStarted","Data":"27e9a3d115fab2a2e2794b7d81f2211ba337eb64c8ab24433e6f3b910fe49942"} Nov 26 09:10:47 crc kubenswrapper[4940]: I1126 09:10:47.517671 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" podStartSLOduration=1.7280498610000001 podStartE2EDuration="3.517653975s" podCreationTimestamp="2025-11-26 09:10:44 +0000 UTC" firstStartedPulling="2025-11-26 09:10:45.28735321 +0000 UTC m=+8146.807494829" lastFinishedPulling="2025-11-26 09:10:47.076957284 +0000 UTC m=+8148.597098943" observedRunningTime="2025-11-26 09:10:47.510001751 +0000 UTC m=+8149.030143370" watchObservedRunningTime="2025-11-26 09:10:47.517653975 +0000 UTC m=+8149.037795594" Nov 26 09:10:47 crc kubenswrapper[4940]: I1126 09:10:47.543382 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" podStartSLOduration=2.506130188 podStartE2EDuration="3.543359922s" podCreationTimestamp="2025-11-26 09:10:44 +0000 UTC" firstStartedPulling="2025-11-26 09:10:45.398258317 +0000 UTC m=+8146.918399936" lastFinishedPulling="2025-11-26 09:10:46.435488041 +0000 UTC m=+8147.955629670" observedRunningTime="2025-11-26 09:10:47.526877188 +0000 UTC m=+8149.047018837" watchObservedRunningTime="2025-11-26 09:10:47.543359922 +0000 UTC m=+8149.063501571" Nov 26 09:10:48 crc kubenswrapper[4940]: I1126 09:10:48.612774 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:48 crc kubenswrapper[4940]: I1126 09:10:48.670596 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:48 crc kubenswrapper[4940]: I1126 09:10:48.852436 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bzvhc"] Nov 26 09:10:50 crc kubenswrapper[4940]: I1126 09:10:50.165665 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:10:50 crc kubenswrapper[4940]: E1126 09:10:50.165979 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:10:50 crc kubenswrapper[4940]: I1126 09:10:50.570395 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bzvhc" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="registry-server" containerID="cri-o://d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd" gracePeriod=2 Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.122424 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.305367 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdgkr\" (UniqueName: \"kubernetes.io/projected/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-kube-api-access-cdgkr\") pod \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.305513 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-utilities\") pod \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.305554 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-catalog-content\") pod \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\" (UID: \"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6\") " Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.308276 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-utilities" (OuterVolumeSpecName: "utilities") pod "4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" (UID: "4171d2d5-ce10-4eee-a68f-22e61ef5e9f6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.312236 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-kube-api-access-cdgkr" (OuterVolumeSpecName: "kube-api-access-cdgkr") pod "4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" (UID: "4171d2d5-ce10-4eee-a68f-22e61ef5e9f6"). InnerVolumeSpecName "kube-api-access-cdgkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.407146 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.407184 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdgkr\" (UniqueName: \"kubernetes.io/projected/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-kube-api-access-cdgkr\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.418227 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" (UID: "4171d2d5-ce10-4eee-a68f-22e61ef5e9f6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.509575 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.582691 4940 generic.go:334] "Generic (PLEG): container finished" podID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerID="d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd" exitCode=0 Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.582742 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzvhc" event={"ID":"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6","Type":"ContainerDied","Data":"d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd"} Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.582774 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzvhc" event={"ID":"4171d2d5-ce10-4eee-a68f-22e61ef5e9f6","Type":"ContainerDied","Data":"df91123f835143ccfdb866b15fb6b657e8207e650862892a71025b701d4f3881"} Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.582795 4940 scope.go:117] "RemoveContainer" containerID="d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.582954 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzvhc" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.627968 4940 scope.go:117] "RemoveContainer" containerID="6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.636967 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bzvhc"] Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.650026 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bzvhc"] Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.668593 4940 scope.go:117] "RemoveContainer" containerID="b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.727474 4940 scope.go:117] "RemoveContainer" containerID="d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd" Nov 26 09:10:51 crc kubenswrapper[4940]: E1126 09:10:51.729482 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd\": container with ID starting with d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd not found: ID does not exist" containerID="d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.729525 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd"} err="failed to get container status \"d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd\": rpc error: code = NotFound desc = could not find container \"d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd\": container with ID starting with d533cbcf9692bb53837a508c616fbf355c27cde12ec48f3d9533e12c61e675fd not found: ID does not exist" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.729552 4940 scope.go:117] "RemoveContainer" containerID="6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7" Nov 26 09:10:51 crc kubenswrapper[4940]: E1126 09:10:51.730207 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7\": container with ID starting with 6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7 not found: ID does not exist" containerID="6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.730228 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7"} err="failed to get container status \"6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7\": rpc error: code = NotFound desc = could not find container \"6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7\": container with ID starting with 6ddd97e5e3338b61bc3be3896998de9db240230ebaaa2df046288c7ca8a7d2c7 not found: ID does not exist" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.730240 4940 scope.go:117] "RemoveContainer" containerID="b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b" Nov 26 09:10:51 crc kubenswrapper[4940]: E1126 09:10:51.730516 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b\": container with ID starting with b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b not found: ID does not exist" containerID="b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b" Nov 26 09:10:51 crc kubenswrapper[4940]: I1126 09:10:51.730536 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b"} err="failed to get container status \"b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b\": rpc error: code = NotFound desc = could not find container \"b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b\": container with ID starting with b3119d4793fcd5bfd29366242e65600ad6b087a1d5c9564fc8cd76ecde17d03b not found: ID does not exist" Nov 26 09:10:53 crc kubenswrapper[4940]: I1126 09:10:53.182078 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" path="/var/lib/kubelet/pods/4171d2d5-ce10-4eee-a68f-22e61ef5e9f6/volumes" Nov 26 09:11:05 crc kubenswrapper[4940]: I1126 09:11:05.166355 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:11:05 crc kubenswrapper[4940]: E1126 09:11:05.167130 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:11:20 crc kubenswrapper[4940]: I1126 09:11:20.166245 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:11:20 crc kubenswrapper[4940]: E1126 09:11:20.167272 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:11:34 crc kubenswrapper[4940]: I1126 09:11:34.180618 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:11:34 crc kubenswrapper[4940]: E1126 09:11:34.183448 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:11:47 crc kubenswrapper[4940]: I1126 09:11:47.165987 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:11:47 crc kubenswrapper[4940]: E1126 09:11:47.166969 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:12:02 crc kubenswrapper[4940]: I1126 09:12:02.165362 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:12:02 crc kubenswrapper[4940]: E1126 09:12:02.167028 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:12:13 crc kubenswrapper[4940]: I1126 09:12:13.166488 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:12:13 crc kubenswrapper[4940]: E1126 09:12:13.167375 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:12:25 crc kubenswrapper[4940]: I1126 09:12:25.165850 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:12:25 crc kubenswrapper[4940]: E1126 09:12:25.166741 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:12:36 crc kubenswrapper[4940]: I1126 09:12:36.165980 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:12:36 crc kubenswrapper[4940]: E1126 09:12:36.166835 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:12:49 crc kubenswrapper[4940]: I1126 09:12:49.176560 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:12:49 crc kubenswrapper[4940]: E1126 09:12:49.177289 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:13:03 crc kubenswrapper[4940]: I1126 09:13:03.166672 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:13:03 crc kubenswrapper[4940]: E1126 09:13:03.167812 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:13:18 crc kubenswrapper[4940]: I1126 09:13:18.165765 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:13:18 crc kubenswrapper[4940]: E1126 09:13:18.166527 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:13:33 crc kubenswrapper[4940]: I1126 09:13:33.165890 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:13:33 crc kubenswrapper[4940]: E1126 09:13:33.166838 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:13:43 crc kubenswrapper[4940]: I1126 09:13:43.896436 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" event={"ID":"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4","Type":"ContainerDied","Data":"b471f93cb2b6bdbfb3e851664a8989751aae2b82049f980d794e6dd5e22db48c"} Nov 26 09:13:43 crc kubenswrapper[4940]: I1126 09:13:43.896509 4940 generic.go:334] "Generic (PLEG): container finished" podID="ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" containerID="b471f93cb2b6bdbfb3e851664a8989751aae2b82049f980d794e6dd5e22db48c" exitCode=0 Nov 26 09:13:44 crc kubenswrapper[4940]: I1126 09:13:44.908925 4940 generic.go:334] "Generic (PLEG): container finished" podID="8d78b5da-8b3a-43fa-a3c0-dbdce05784a7" containerID="27e9a3d115fab2a2e2794b7d81f2211ba337eb64c8ab24433e6f3b910fe49942" exitCode=0 Nov 26 09:13:44 crc kubenswrapper[4940]: I1126 09:13:44.909013 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" event={"ID":"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7","Type":"ContainerDied","Data":"27e9a3d115fab2a2e2794b7d81f2211ba337eb64c8ab24433e6f3b910fe49942"} Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.474358 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.604450 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-bootstrap-combined-ca-bundle\") pod \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.604628 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ceph\") pod \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.604695 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8m6td\" (UniqueName: \"kubernetes.io/projected/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-kube-api-access-8m6td\") pod \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.604746 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ssh-key\") pod \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.604946 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-inventory\") pod \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\" (UID: \"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4\") " Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.612414 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-kube-api-access-8m6td" (OuterVolumeSpecName: "kube-api-access-8m6td") pod "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" (UID: "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4"). InnerVolumeSpecName "kube-api-access-8m6td". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.612563 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ceph" (OuterVolumeSpecName: "ceph") pod "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" (UID: "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.616296 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" (UID: "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.639899 4940 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.639953 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.639971 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8m6td\" (UniqueName: \"kubernetes.io/projected/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-kube-api-access-8m6td\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.649611 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" (UID: "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.666293 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-inventory" (OuterVolumeSpecName: "inventory") pod "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" (UID: "ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.742407 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.742471 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.927093 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" event={"ID":"ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4","Type":"ContainerDied","Data":"7b5528a1751da6995d849877e0cb05922afbb6a42b53481c2223de81ae3c8dcc"} Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.927171 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b5528a1751da6995d849877e0cb05922afbb6a42b53481c2223de81ae3c8dcc" Nov 26 09:13:45 crc kubenswrapper[4940]: I1126 09:13:45.927433 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-jrhf2" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.044955 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-6krwc"] Nov 26 09:13:46 crc kubenswrapper[4940]: E1126 09:13:46.045576 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="registry-server" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.045600 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="registry-server" Nov 26 09:13:46 crc kubenswrapper[4940]: E1126 09:13:46.045618 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="extract-content" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.045627 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="extract-content" Nov 26 09:13:46 crc kubenswrapper[4940]: E1126 09:13:46.045648 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="extract-utilities" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.045656 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="extract-utilities" Nov 26 09:13:46 crc kubenswrapper[4940]: E1126 09:13:46.045670 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" containerName="bootstrap-openstack-openstack-cell1" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.045677 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" containerName="bootstrap-openstack-openstack-cell1" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.045950 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4171d2d5-ce10-4eee-a68f-22e61ef5e9f6" containerName="registry-server" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.045989 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4" containerName="bootstrap-openstack-openstack-cell1" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.047104 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.051597 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.051827 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.059231 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-6krwc"] Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.150589 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85jqg\" (UniqueName: \"kubernetes.io/projected/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-kube-api-access-85jqg\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.151245 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-inventory\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.151427 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ceph\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.151575 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ssh-key\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.253315 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ceph\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.253385 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ssh-key\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.253468 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85jqg\" (UniqueName: \"kubernetes.io/projected/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-kube-api-access-85jqg\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.253560 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-inventory\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.260375 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ssh-key\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.260383 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ceph\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.260754 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-inventory\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.275786 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85jqg\" (UniqueName: \"kubernetes.io/projected/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-kube-api-access-85jqg\") pod \"download-cache-openstack-openstack-cell1-6krwc\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.361778 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.374240 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.581294 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-bootstrap-combined-ca-bundle\") pod \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.581648 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-ssh-key\") pod \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.581703 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nj6nx\" (UniqueName: \"kubernetes.io/projected/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-kube-api-access-nj6nx\") pod \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.581746 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-inventory\") pod \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\" (UID: \"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7\") " Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.587027 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-kube-api-access-nj6nx" (OuterVolumeSpecName: "kube-api-access-nj6nx") pod "8d78b5da-8b3a-43fa-a3c0-dbdce05784a7" (UID: "8d78b5da-8b3a-43fa-a3c0-dbdce05784a7"). InnerVolumeSpecName "kube-api-access-nj6nx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.601489 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "8d78b5da-8b3a-43fa-a3c0-dbdce05784a7" (UID: "8d78b5da-8b3a-43fa-a3c0-dbdce05784a7"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.614198 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-inventory" (OuterVolumeSpecName: "inventory") pod "8d78b5da-8b3a-43fa-a3c0-dbdce05784a7" (UID: "8d78b5da-8b3a-43fa-a3c0-dbdce05784a7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.622123 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8d78b5da-8b3a-43fa-a3c0-dbdce05784a7" (UID: "8d78b5da-8b3a-43fa-a3c0-dbdce05784a7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.684526 4940 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.684558 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.684568 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nj6nx\" (UniqueName: \"kubernetes.io/projected/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-kube-api-access-nj6nx\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.684577 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d78b5da-8b3a-43fa-a3c0-dbdce05784a7-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.948285 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" event={"ID":"8d78b5da-8b3a-43fa-a3c0-dbdce05784a7","Type":"ContainerDied","Data":"2ea27ffe5e21e3ae3225ac16e3a2d9a685d3e952a3f3a0e9f7763edaa9381078"} Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.948369 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ea27ffe5e21e3ae3225ac16e3a2d9a685d3e952a3f3a0e9f7763edaa9381078" Nov 26 09:13:46 crc kubenswrapper[4940]: I1126 09:13:46.948479 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-networker-rn57g" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.026399 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-6krwc"] Nov 26 09:13:47 crc kubenswrapper[4940]: W1126 09:13:47.035267 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95d0f4e1_aa9b_443e_b5e5_2668c3aa2214.slice/crio-8af029729452e35aa0982ee375e0d4668cfb0b801425a8692d1679416cc0512f WatchSource:0}: Error finding container 8af029729452e35aa0982ee375e0d4668cfb0b801425a8692d1679416cc0512f: Status 404 returned error can't find the container with id 8af029729452e35aa0982ee375e0d4668cfb0b801425a8692d1679416cc0512f Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.038342 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-networker-8kqfq"] Nov 26 09:13:47 crc kubenswrapper[4940]: E1126 09:13:47.039770 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d78b5da-8b3a-43fa-a3c0-dbdce05784a7" containerName="bootstrap-openstack-openstack-networker" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.039801 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d78b5da-8b3a-43fa-a3c0-dbdce05784a7" containerName="bootstrap-openstack-openstack-networker" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.040161 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d78b5da-8b3a-43fa-a3c0-dbdce05784a7" containerName="bootstrap-openstack-openstack-networker" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.041302 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.047827 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.048472 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.064098 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-networker-8kqfq"] Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.195371 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-inventory\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.195444 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2xhj\" (UniqueName: \"kubernetes.io/projected/94b1f446-bfa7-4c5c-9e23-643d37c77a39-kube-api-access-b2xhj\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.195489 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-ssh-key\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.297947 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-inventory\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.298022 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2xhj\" (UniqueName: \"kubernetes.io/projected/94b1f446-bfa7-4c5c-9e23-643d37c77a39-kube-api-access-b2xhj\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.298085 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-ssh-key\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.303509 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-ssh-key\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.305554 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-inventory\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.331706 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2xhj\" (UniqueName: \"kubernetes.io/projected/94b1f446-bfa7-4c5c-9e23-643d37c77a39-kube-api-access-b2xhj\") pod \"download-cache-openstack-openstack-networker-8kqfq\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.389957 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.776178 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-networker-8kqfq"] Nov 26 09:13:47 crc kubenswrapper[4940]: W1126 09:13:47.781178 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94b1f446_bfa7_4c5c_9e23_643d37c77a39.slice/crio-0740ae824264eac9ce2c2eb9139f98ec1794e4233215999a8dfa95e7c5e4e185 WatchSource:0}: Error finding container 0740ae824264eac9ce2c2eb9139f98ec1794e4233215999a8dfa95e7c5e4e185: Status 404 returned error can't find the container with id 0740ae824264eac9ce2c2eb9139f98ec1794e4233215999a8dfa95e7c5e4e185 Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.959286 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" event={"ID":"94b1f446-bfa7-4c5c-9e23-643d37c77a39","Type":"ContainerStarted","Data":"0740ae824264eac9ce2c2eb9139f98ec1794e4233215999a8dfa95e7c5e4e185"} Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.962229 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" event={"ID":"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214","Type":"ContainerStarted","Data":"80910228e73b96860854a2c87a25a6284bf0795ce4bf7f3db9cac8b7f43b793e"} Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.962297 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" event={"ID":"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214","Type":"ContainerStarted","Data":"8af029729452e35aa0982ee375e0d4668cfb0b801425a8692d1679416cc0512f"} Nov 26 09:13:47 crc kubenswrapper[4940]: I1126 09:13:47.996208 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" podStartSLOduration=1.477595939 podStartE2EDuration="1.996189465s" podCreationTimestamp="2025-11-26 09:13:46 +0000 UTC" firstStartedPulling="2025-11-26 09:13:47.037846469 +0000 UTC m=+8328.557988098" lastFinishedPulling="2025-11-26 09:13:47.556440005 +0000 UTC m=+8329.076581624" observedRunningTime="2025-11-26 09:13:47.983625517 +0000 UTC m=+8329.503767146" watchObservedRunningTime="2025-11-26 09:13:47.996189465 +0000 UTC m=+8329.516331084" Nov 26 09:13:48 crc kubenswrapper[4940]: I1126 09:13:48.166056 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:13:48 crc kubenswrapper[4940]: E1126 09:13:48.166422 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:13:48 crc kubenswrapper[4940]: I1126 09:13:48.976418 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" event={"ID":"94b1f446-bfa7-4c5c-9e23-643d37c77a39","Type":"ContainerStarted","Data":"6a700ea636b4671b01be1fc559d68a1169a10844256ab9494c2f0b1c1207ce6f"} Nov 26 09:13:48 crc kubenswrapper[4940]: I1126 09:13:48.999104 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" podStartSLOduration=2.493644181 podStartE2EDuration="2.999084389s" podCreationTimestamp="2025-11-26 09:13:46 +0000 UTC" firstStartedPulling="2025-11-26 09:13:47.784506826 +0000 UTC m=+8329.304648455" lastFinishedPulling="2025-11-26 09:13:48.289947044 +0000 UTC m=+8329.810088663" observedRunningTime="2025-11-26 09:13:48.995955489 +0000 UTC m=+8330.516097118" watchObservedRunningTime="2025-11-26 09:13:48.999084389 +0000 UTC m=+8330.519226028" Nov 26 09:13:59 crc kubenswrapper[4940]: I1126 09:13:59.174113 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:13:59 crc kubenswrapper[4940]: E1126 09:13:59.175083 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:14:10 crc kubenswrapper[4940]: I1126 09:14:10.165544 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:14:10 crc kubenswrapper[4940]: E1126 09:14:10.166647 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:14:23 crc kubenswrapper[4940]: I1126 09:14:23.166296 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:14:23 crc kubenswrapper[4940]: E1126 09:14:23.166990 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:14:38 crc kubenswrapper[4940]: I1126 09:14:38.166599 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:14:38 crc kubenswrapper[4940]: E1126 09:14:38.169797 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.747142 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bzngn"] Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.752608 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.756920 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzngn"] Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.825844 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-catalog-content\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.826193 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-utilities\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.826265 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtfgk\" (UniqueName: \"kubernetes.io/projected/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-kube-api-access-qtfgk\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.929687 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-utilities\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.930153 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-utilities\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.930262 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtfgk\" (UniqueName: \"kubernetes.io/projected/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-kube-api-access-qtfgk\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.930354 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-catalog-content\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.930665 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-catalog-content\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:46 crc kubenswrapper[4940]: I1126 09:14:46.952860 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtfgk\" (UniqueName: \"kubernetes.io/projected/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-kube-api-access-qtfgk\") pod \"redhat-marketplace-bzngn\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:47 crc kubenswrapper[4940]: I1126 09:14:47.082853 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:47 crc kubenswrapper[4940]: I1126 09:14:47.617912 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzngn"] Nov 26 09:14:47 crc kubenswrapper[4940]: I1126 09:14:47.647382 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzngn" event={"ID":"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a","Type":"ContainerStarted","Data":"1f66b31992681d28041200bc90279825f775e36644525dae128c970567133903"} Nov 26 09:14:48 crc kubenswrapper[4940]: I1126 09:14:48.659768 4940 generic.go:334] "Generic (PLEG): container finished" podID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerID="75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7" exitCode=0 Nov 26 09:14:48 crc kubenswrapper[4940]: I1126 09:14:48.659880 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzngn" event={"ID":"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a","Type":"ContainerDied","Data":"75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7"} Nov 26 09:14:50 crc kubenswrapper[4940]: I1126 09:14:50.165842 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:14:50 crc kubenswrapper[4940]: E1126 09:14:50.166381 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:14:50 crc kubenswrapper[4940]: I1126 09:14:50.686304 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzngn" event={"ID":"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a","Type":"ContainerStarted","Data":"08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539"} Nov 26 09:14:51 crc kubenswrapper[4940]: I1126 09:14:51.705463 4940 generic.go:334] "Generic (PLEG): container finished" podID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerID="08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539" exitCode=0 Nov 26 09:14:51 crc kubenswrapper[4940]: I1126 09:14:51.705550 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzngn" event={"ID":"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a","Type":"ContainerDied","Data":"08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539"} Nov 26 09:14:53 crc kubenswrapper[4940]: I1126 09:14:53.729854 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzngn" event={"ID":"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a","Type":"ContainerStarted","Data":"20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52"} Nov 26 09:14:53 crc kubenswrapper[4940]: I1126 09:14:53.759453 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bzngn" podStartSLOduration=3.793241918 podStartE2EDuration="7.759427518s" podCreationTimestamp="2025-11-26 09:14:46 +0000 UTC" firstStartedPulling="2025-11-26 09:14:48.662118949 +0000 UTC m=+8390.182260558" lastFinishedPulling="2025-11-26 09:14:52.628304539 +0000 UTC m=+8394.148446158" observedRunningTime="2025-11-26 09:14:53.749840073 +0000 UTC m=+8395.269981692" watchObservedRunningTime="2025-11-26 09:14:53.759427518 +0000 UTC m=+8395.279569167" Nov 26 09:14:57 crc kubenswrapper[4940]: I1126 09:14:57.083512 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:57 crc kubenswrapper[4940]: I1126 09:14:57.083816 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:57 crc kubenswrapper[4940]: I1126 09:14:57.131951 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:57 crc kubenswrapper[4940]: I1126 09:14:57.801389 4940 generic.go:334] "Generic (PLEG): container finished" podID="94b1f446-bfa7-4c5c-9e23-643d37c77a39" containerID="6a700ea636b4671b01be1fc559d68a1169a10844256ab9494c2f0b1c1207ce6f" exitCode=0 Nov 26 09:14:57 crc kubenswrapper[4940]: I1126 09:14:57.801529 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" event={"ID":"94b1f446-bfa7-4c5c-9e23-643d37c77a39","Type":"ContainerDied","Data":"6a700ea636b4671b01be1fc559d68a1169a10844256ab9494c2f0b1c1207ce6f"} Nov 26 09:14:57 crc kubenswrapper[4940]: I1126 09:14:57.864009 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:14:57 crc kubenswrapper[4940]: I1126 09:14:57.921873 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzngn"] Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.321128 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.428309 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-ssh-key\") pod \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.428418 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-inventory\") pod \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.428440 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2xhj\" (UniqueName: \"kubernetes.io/projected/94b1f446-bfa7-4c5c-9e23-643d37c77a39-kube-api-access-b2xhj\") pod \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\" (UID: \"94b1f446-bfa7-4c5c-9e23-643d37c77a39\") " Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.439330 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94b1f446-bfa7-4c5c-9e23-643d37c77a39-kube-api-access-b2xhj" (OuterVolumeSpecName: "kube-api-access-b2xhj") pod "94b1f446-bfa7-4c5c-9e23-643d37c77a39" (UID: "94b1f446-bfa7-4c5c-9e23-643d37c77a39"). InnerVolumeSpecName "kube-api-access-b2xhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.480964 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "94b1f446-bfa7-4c5c-9e23-643d37c77a39" (UID: "94b1f446-bfa7-4c5c-9e23-643d37c77a39"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.488599 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-inventory" (OuterVolumeSpecName: "inventory") pod "94b1f446-bfa7-4c5c-9e23-643d37c77a39" (UID: "94b1f446-bfa7-4c5c-9e23-643d37c77a39"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.531282 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.531352 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/94b1f446-bfa7-4c5c-9e23-643d37c77a39-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.531373 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2xhj\" (UniqueName: \"kubernetes.io/projected/94b1f446-bfa7-4c5c-9e23-643d37c77a39-kube-api-access-b2xhj\") on node \"crc\" DevicePath \"\"" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.831274 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bzngn" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerName="registry-server" containerID="cri-o://20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52" gracePeriod=2 Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.831725 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.833246 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-networker-8kqfq" event={"ID":"94b1f446-bfa7-4c5c-9e23-643d37c77a39","Type":"ContainerDied","Data":"0740ae824264eac9ce2c2eb9139f98ec1794e4233215999a8dfa95e7c5e4e185"} Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.833294 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0740ae824264eac9ce2c2eb9139f98ec1794e4233215999a8dfa95e7c5e4e185" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.946791 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-networker-6jkk6"] Nov 26 09:14:59 crc kubenswrapper[4940]: E1126 09:14:59.947295 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94b1f446-bfa7-4c5c-9e23-643d37c77a39" containerName="download-cache-openstack-openstack-networker" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.947319 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="94b1f446-bfa7-4c5c-9e23-643d37c77a39" containerName="download-cache-openstack-openstack-networker" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.947633 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="94b1f446-bfa7-4c5c-9e23-643d37c77a39" containerName="download-cache-openstack-openstack-networker" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.948664 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.952179 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.953676 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:14:59 crc kubenswrapper[4940]: I1126 09:14:59.961303 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-networker-6jkk6"] Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.042538 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-inventory\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.042612 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-ssh-key\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.042822 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l75qg\" (UniqueName: \"kubernetes.io/projected/1ad45eff-0dd2-4c43-b72a-7d852ae00822-kube-api-access-l75qg\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.144801 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-inventory\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.144880 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-ssh-key\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.144952 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l75qg\" (UniqueName: \"kubernetes.io/projected/1ad45eff-0dd2-4c43-b72a-7d852ae00822-kube-api-access-l75qg\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.149216 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd"] Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.150398 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-ssh-key\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.150808 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.153681 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.153921 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.155187 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-inventory\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.160701 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd"] Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.167790 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l75qg\" (UniqueName: \"kubernetes.io/projected/1ad45eff-0dd2-4c43-b72a-7d852ae00822-kube-api-access-l75qg\") pod \"configure-network-openstack-openstack-networker-6jkk6\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.325823 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.348118 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzq48\" (UniqueName: \"kubernetes.io/projected/8939c50e-fda8-4489-8901-9cd1484b0123-kube-api-access-lzq48\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.348990 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8939c50e-fda8-4489-8901-9cd1484b0123-config-volume\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.349534 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8939c50e-fda8-4489-8901-9cd1484b0123-secret-volume\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.434065 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.451226 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8939c50e-fda8-4489-8901-9cd1484b0123-config-volume\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.451349 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8939c50e-fda8-4489-8901-9cd1484b0123-secret-volume\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.451402 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzq48\" (UniqueName: \"kubernetes.io/projected/8939c50e-fda8-4489-8901-9cd1484b0123-kube-api-access-lzq48\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.453979 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8939c50e-fda8-4489-8901-9cd1484b0123-config-volume\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.471703 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8939c50e-fda8-4489-8901-9cd1484b0123-secret-volume\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.471738 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzq48\" (UniqueName: \"kubernetes.io/projected/8939c50e-fda8-4489-8901-9cd1484b0123-kube-api-access-lzq48\") pod \"collect-profiles-29402475-hzbwd\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.476648 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.554864 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-utilities\") pod \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.556071 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-utilities" (OuterVolumeSpecName: "utilities") pod "eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" (UID: "eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.557131 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtfgk\" (UniqueName: \"kubernetes.io/projected/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-kube-api-access-qtfgk\") pod \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.557622 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-catalog-content\") pod \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\" (UID: \"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a\") " Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.558219 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.561315 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-kube-api-access-qtfgk" (OuterVolumeSpecName: "kube-api-access-qtfgk") pod "eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" (UID: "eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a"). InnerVolumeSpecName "kube-api-access-qtfgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.577745 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" (UID: "eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.660698 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtfgk\" (UniqueName: \"kubernetes.io/projected/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-kube-api-access-qtfgk\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.660995 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.840384 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-networker-6jkk6"] Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.844286 4940 generic.go:334] "Generic (PLEG): container finished" podID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerID="20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52" exitCode=0 Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.844346 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzngn" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.844340 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzngn" event={"ID":"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a","Type":"ContainerDied","Data":"20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52"} Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.844436 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzngn" event={"ID":"eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a","Type":"ContainerDied","Data":"1f66b31992681d28041200bc90279825f775e36644525dae128c970567133903"} Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.844455 4940 scope.go:117] "RemoveContainer" containerID="20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.875515 4940 scope.go:117] "RemoveContainer" containerID="08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.882217 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzngn"] Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.893116 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzngn"] Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.894466 4940 scope.go:117] "RemoveContainer" containerID="75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.919990 4940 scope.go:117] "RemoveContainer" containerID="20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52" Nov 26 09:15:00 crc kubenswrapper[4940]: E1126 09:15:00.920489 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52\": container with ID starting with 20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52 not found: ID does not exist" containerID="20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.920553 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52"} err="failed to get container status \"20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52\": rpc error: code = NotFound desc = could not find container \"20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52\": container with ID starting with 20a2bda1002d089da9b868e3b913eb2b0f1ac2b288d57456aaca0b6b63564f52 not found: ID does not exist" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.920583 4940 scope.go:117] "RemoveContainer" containerID="08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539" Nov 26 09:15:00 crc kubenswrapper[4940]: E1126 09:15:00.921838 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539\": container with ID starting with 08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539 not found: ID does not exist" containerID="08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.921866 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539"} err="failed to get container status \"08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539\": rpc error: code = NotFound desc = could not find container \"08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539\": container with ID starting with 08f2fa4dd52de7b75f2e094f2cad588bc0c7e9d89320b6c3d493b7ce86807539 not found: ID does not exist" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.921884 4940 scope.go:117] "RemoveContainer" containerID="75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7" Nov 26 09:15:00 crc kubenswrapper[4940]: E1126 09:15:00.922293 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7\": container with ID starting with 75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7 not found: ID does not exist" containerID="75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.922343 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7"} err="failed to get container status \"75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7\": rpc error: code = NotFound desc = could not find container \"75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7\": container with ID starting with 75ea23d395d467e76ad04a64c8377bedbfcd387b5347fcd60280a79c0f9bb9a7 not found: ID does not exist" Nov 26 09:15:00 crc kubenswrapper[4940]: I1126 09:15:00.973561 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd"] Nov 26 09:15:00 crc kubenswrapper[4940]: W1126 09:15:00.981653 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8939c50e_fda8_4489_8901_9cd1484b0123.slice/crio-1181474a9c1e157929e059d102570f803ffd2122c80644fa14f6e1c5ed3b7d0c WatchSource:0}: Error finding container 1181474a9c1e157929e059d102570f803ffd2122c80644fa14f6e1c5ed3b7d0c: Status 404 returned error can't find the container with id 1181474a9c1e157929e059d102570f803ffd2122c80644fa14f6e1c5ed3b7d0c Nov 26 09:15:01 crc kubenswrapper[4940]: I1126 09:15:01.177243 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" path="/var/lib/kubelet/pods/eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a/volumes" Nov 26 09:15:01 crc kubenswrapper[4940]: I1126 09:15:01.855984 4940 generic.go:334] "Generic (PLEG): container finished" podID="8939c50e-fda8-4489-8901-9cd1484b0123" containerID="82e76cbc1d056bf0cff87bb9993e50109360370b824f052b649f7f2b01d2eb5b" exitCode=0 Nov 26 09:15:01 crc kubenswrapper[4940]: I1126 09:15:01.856074 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" event={"ID":"8939c50e-fda8-4489-8901-9cd1484b0123","Type":"ContainerDied","Data":"82e76cbc1d056bf0cff87bb9993e50109360370b824f052b649f7f2b01d2eb5b"} Nov 26 09:15:01 crc kubenswrapper[4940]: I1126 09:15:01.856300 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" event={"ID":"8939c50e-fda8-4489-8901-9cd1484b0123","Type":"ContainerStarted","Data":"1181474a9c1e157929e059d102570f803ffd2122c80644fa14f6e1c5ed3b7d0c"} Nov 26 09:15:01 crc kubenswrapper[4940]: I1126 09:15:01.858332 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" event={"ID":"1ad45eff-0dd2-4c43-b72a-7d852ae00822","Type":"ContainerStarted","Data":"356917b78112e179f2b9756ccc8f4244f6557045ea65bfdb627f3095ce4e5404"} Nov 26 09:15:01 crc kubenswrapper[4940]: I1126 09:15:01.858432 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" event={"ID":"1ad45eff-0dd2-4c43-b72a-7d852ae00822","Type":"ContainerStarted","Data":"5671bcdb80abacee8d68699c0edd46b90b157e3539c1551fecede67481f2c53a"} Nov 26 09:15:01 crc kubenswrapper[4940]: I1126 09:15:01.900241 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" podStartSLOduration=2.387004456 podStartE2EDuration="2.900221382s" podCreationTimestamp="2025-11-26 09:14:59 +0000 UTC" firstStartedPulling="2025-11-26 09:15:00.844270622 +0000 UTC m=+8402.364412241" lastFinishedPulling="2025-11-26 09:15:01.357487548 +0000 UTC m=+8402.877629167" observedRunningTime="2025-11-26 09:15:01.893685074 +0000 UTC m=+8403.413826693" watchObservedRunningTime="2025-11-26 09:15:01.900221382 +0000 UTC m=+8403.420363001" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.244746 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.415995 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8939c50e-fda8-4489-8901-9cd1484b0123-config-volume\") pod \"8939c50e-fda8-4489-8901-9cd1484b0123\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.416208 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8939c50e-fda8-4489-8901-9cd1484b0123-secret-volume\") pod \"8939c50e-fda8-4489-8901-9cd1484b0123\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.416288 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzq48\" (UniqueName: \"kubernetes.io/projected/8939c50e-fda8-4489-8901-9cd1484b0123-kube-api-access-lzq48\") pod \"8939c50e-fda8-4489-8901-9cd1484b0123\" (UID: \"8939c50e-fda8-4489-8901-9cd1484b0123\") " Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.417918 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8939c50e-fda8-4489-8901-9cd1484b0123-config-volume" (OuterVolumeSpecName: "config-volume") pod "8939c50e-fda8-4489-8901-9cd1484b0123" (UID: "8939c50e-fda8-4489-8901-9cd1484b0123"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.423413 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8939c50e-fda8-4489-8901-9cd1484b0123-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8939c50e-fda8-4489-8901-9cd1484b0123" (UID: "8939c50e-fda8-4489-8901-9cd1484b0123"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.426361 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8939c50e-fda8-4489-8901-9cd1484b0123-kube-api-access-lzq48" (OuterVolumeSpecName: "kube-api-access-lzq48") pod "8939c50e-fda8-4489-8901-9cd1484b0123" (UID: "8939c50e-fda8-4489-8901-9cd1484b0123"). InnerVolumeSpecName "kube-api-access-lzq48". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.518240 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8939c50e-fda8-4489-8901-9cd1484b0123-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.518277 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzq48\" (UniqueName: \"kubernetes.io/projected/8939c50e-fda8-4489-8901-9cd1484b0123-kube-api-access-lzq48\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.518287 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8939c50e-fda8-4489-8901-9cd1484b0123-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.876471 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" event={"ID":"8939c50e-fda8-4489-8901-9cd1484b0123","Type":"ContainerDied","Data":"1181474a9c1e157929e059d102570f803ffd2122c80644fa14f6e1c5ed3b7d0c"} Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.876522 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1181474a9c1e157929e059d102570f803ffd2122c80644fa14f6e1c5ed3b7d0c" Nov 26 09:15:03 crc kubenswrapper[4940]: I1126 09:15:03.876698 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd" Nov 26 09:15:04 crc kubenswrapper[4940]: I1126 09:15:04.165755 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:15:04 crc kubenswrapper[4940]: I1126 09:15:04.321775 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn"] Nov 26 09:15:04 crc kubenswrapper[4940]: I1126 09:15:04.333837 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402430-nqtqn"] Nov 26 09:15:04 crc kubenswrapper[4940]: I1126 09:15:04.891320 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"9e9aff618bcec373fe2c25642a6939cbb5873fb50b14862a18a1ace3c3c4e20a"} Nov 26 09:15:05 crc kubenswrapper[4940]: I1126 09:15:05.178799 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5" path="/var/lib/kubelet/pods/5a8e4af9-e7ec-4358-8db3-fd78c2adcfa5/volumes" Nov 26 09:15:16 crc kubenswrapper[4940]: I1126 09:15:16.396555 4940 scope.go:117] "RemoveContainer" containerID="23349b5545f475e792bb33216eb291bceec31a8447eb0809988dcb19cf903e93" Nov 26 09:15:19 crc kubenswrapper[4940]: I1126 09:15:19.088729 4940 generic.go:334] "Generic (PLEG): container finished" podID="95d0f4e1-aa9b-443e-b5e5-2668c3aa2214" containerID="80910228e73b96860854a2c87a25a6284bf0795ce4bf7f3db9cac8b7f43b793e" exitCode=0 Nov 26 09:15:19 crc kubenswrapper[4940]: I1126 09:15:19.088812 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" event={"ID":"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214","Type":"ContainerDied","Data":"80910228e73b96860854a2c87a25a6284bf0795ce4bf7f3db9cac8b7f43b793e"} Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.605240 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.793548 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ssh-key\") pod \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.793651 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85jqg\" (UniqueName: \"kubernetes.io/projected/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-kube-api-access-85jqg\") pod \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.793806 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ceph\") pod \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.793879 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-inventory\") pod \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\" (UID: \"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214\") " Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.801165 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-kube-api-access-85jqg" (OuterVolumeSpecName: "kube-api-access-85jqg") pod "95d0f4e1-aa9b-443e-b5e5-2668c3aa2214" (UID: "95d0f4e1-aa9b-443e-b5e5-2668c3aa2214"). InnerVolumeSpecName "kube-api-access-85jqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.823534 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ceph" (OuterVolumeSpecName: "ceph") pod "95d0f4e1-aa9b-443e-b5e5-2668c3aa2214" (UID: "95d0f4e1-aa9b-443e-b5e5-2668c3aa2214"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.845447 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "95d0f4e1-aa9b-443e-b5e5-2668c3aa2214" (UID: "95d0f4e1-aa9b-443e-b5e5-2668c3aa2214"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.871146 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-inventory" (OuterVolumeSpecName: "inventory") pod "95d0f4e1-aa9b-443e-b5e5-2668c3aa2214" (UID: "95d0f4e1-aa9b-443e-b5e5-2668c3aa2214"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.897504 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.897557 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.897577 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:20 crc kubenswrapper[4940]: I1126 09:15:20.897594 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85jqg\" (UniqueName: \"kubernetes.io/projected/95d0f4e1-aa9b-443e-b5e5-2668c3aa2214-kube-api-access-85jqg\") on node \"crc\" DevicePath \"\"" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.107524 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" event={"ID":"95d0f4e1-aa9b-443e-b5e5-2668c3aa2214","Type":"ContainerDied","Data":"8af029729452e35aa0982ee375e0d4668cfb0b801425a8692d1679416cc0512f"} Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.107574 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8af029729452e35aa0982ee375e0d4668cfb0b801425a8692d1679416cc0512f" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.107623 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-6krwc" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.200725 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-xv47l"] Nov 26 09:15:21 crc kubenswrapper[4940]: E1126 09:15:21.201238 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerName="extract-utilities" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.201257 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerName="extract-utilities" Nov 26 09:15:21 crc kubenswrapper[4940]: E1126 09:15:21.201279 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerName="extract-content" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.201285 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerName="extract-content" Nov 26 09:15:21 crc kubenswrapper[4940]: E1126 09:15:21.201308 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerName="registry-server" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.201314 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerName="registry-server" Nov 26 09:15:21 crc kubenswrapper[4940]: E1126 09:15:21.201331 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95d0f4e1-aa9b-443e-b5e5-2668c3aa2214" containerName="download-cache-openstack-openstack-cell1" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.201337 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="95d0f4e1-aa9b-443e-b5e5-2668c3aa2214" containerName="download-cache-openstack-openstack-cell1" Nov 26 09:15:21 crc kubenswrapper[4940]: E1126 09:15:21.201354 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8939c50e-fda8-4489-8901-9cd1484b0123" containerName="collect-profiles" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.201360 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8939c50e-fda8-4489-8901-9cd1484b0123" containerName="collect-profiles" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.201646 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb29c062-6e3e-4c9f-a8d1-2ecfbfc6559a" containerName="registry-server" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.201658 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8939c50e-fda8-4489-8901-9cd1484b0123" containerName="collect-profiles" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.201675 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="95d0f4e1-aa9b-443e-b5e5-2668c3aa2214" containerName="download-cache-openstack-openstack-cell1" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.202527 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.204676 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.205172 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.227336 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-xv47l"] Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.304183 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-inventory\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.304444 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ssh-key\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.304560 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ceph\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.304686 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjz9f\" (UniqueName: \"kubernetes.io/projected/b6222fff-0241-4d22-b985-de311d9dcd17-kube-api-access-pjz9f\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.407094 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ceph\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.407539 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjz9f\" (UniqueName: \"kubernetes.io/projected/b6222fff-0241-4d22-b985-de311d9dcd17-kube-api-access-pjz9f\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.407721 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-inventory\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.407799 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ssh-key\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.413002 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ssh-key\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.413129 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ceph\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.413567 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-inventory\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.426445 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjz9f\" (UniqueName: \"kubernetes.io/projected/b6222fff-0241-4d22-b985-de311d9dcd17-kube-api-access-pjz9f\") pod \"configure-network-openstack-openstack-cell1-xv47l\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:21 crc kubenswrapper[4940]: I1126 09:15:21.521137 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:15:22 crc kubenswrapper[4940]: I1126 09:15:22.067956 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-xv47l"] Nov 26 09:15:22 crc kubenswrapper[4940]: I1126 09:15:22.075133 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:15:22 crc kubenswrapper[4940]: I1126 09:15:22.119312 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" event={"ID":"b6222fff-0241-4d22-b985-de311d9dcd17","Type":"ContainerStarted","Data":"6911cd9d76e79a0f79ce7cf1c7e86ef28ff79062f3123bb00813f13d89c21040"} Nov 26 09:15:24 crc kubenswrapper[4940]: I1126 09:15:24.143217 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" event={"ID":"b6222fff-0241-4d22-b985-de311d9dcd17","Type":"ContainerStarted","Data":"46e62df5fc02dd019032d2445198cce571b9792007cc2c4a03820140d8539235"} Nov 26 09:15:24 crc kubenswrapper[4940]: I1126 09:15:24.170192 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" podStartSLOduration=2.318481568 podStartE2EDuration="3.170167534s" podCreationTimestamp="2025-11-26 09:15:21 +0000 UTC" firstStartedPulling="2025-11-26 09:15:22.074904193 +0000 UTC m=+8423.595045812" lastFinishedPulling="2025-11-26 09:15:22.926590159 +0000 UTC m=+8424.446731778" observedRunningTime="2025-11-26 09:15:24.163335386 +0000 UTC m=+8425.683477015" watchObservedRunningTime="2025-11-26 09:15:24.170167534 +0000 UTC m=+8425.690309173" Nov 26 09:16:06 crc kubenswrapper[4940]: I1126 09:16:06.617804 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad45eff-0dd2-4c43-b72a-7d852ae00822" containerID="356917b78112e179f2b9756ccc8f4244f6557045ea65bfdb627f3095ce4e5404" exitCode=0 Nov 26 09:16:06 crc kubenswrapper[4940]: I1126 09:16:06.617892 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" event={"ID":"1ad45eff-0dd2-4c43-b72a-7d852ae00822","Type":"ContainerDied","Data":"356917b78112e179f2b9756ccc8f4244f6557045ea65bfdb627f3095ce4e5404"} Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.124431 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.278805 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l75qg\" (UniqueName: \"kubernetes.io/projected/1ad45eff-0dd2-4c43-b72a-7d852ae00822-kube-api-access-l75qg\") pod \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.278949 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-inventory\") pod \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.279210 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-ssh-key\") pod \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\" (UID: \"1ad45eff-0dd2-4c43-b72a-7d852ae00822\") " Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.286901 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ad45eff-0dd2-4c43-b72a-7d852ae00822-kube-api-access-l75qg" (OuterVolumeSpecName: "kube-api-access-l75qg") pod "1ad45eff-0dd2-4c43-b72a-7d852ae00822" (UID: "1ad45eff-0dd2-4c43-b72a-7d852ae00822"). InnerVolumeSpecName "kube-api-access-l75qg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.305190 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1ad45eff-0dd2-4c43-b72a-7d852ae00822" (UID: "1ad45eff-0dd2-4c43-b72a-7d852ae00822"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.311914 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-inventory" (OuterVolumeSpecName: "inventory") pod "1ad45eff-0dd2-4c43-b72a-7d852ae00822" (UID: "1ad45eff-0dd2-4c43-b72a-7d852ae00822"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.387184 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.387225 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l75qg\" (UniqueName: \"kubernetes.io/projected/1ad45eff-0dd2-4c43-b72a-7d852ae00822-kube-api-access-l75qg\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.387244 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ad45eff-0dd2-4c43-b72a-7d852ae00822-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.641407 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" event={"ID":"1ad45eff-0dd2-4c43-b72a-7d852ae00822","Type":"ContainerDied","Data":"5671bcdb80abacee8d68699c0edd46b90b157e3539c1551fecede67481f2c53a"} Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.641463 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5671bcdb80abacee8d68699c0edd46b90b157e3539c1551fecede67481f2c53a" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.641465 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-networker-6jkk6" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.803554 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-networker-gbqp4"] Nov 26 09:16:08 crc kubenswrapper[4940]: E1126 09:16:08.804292 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ad45eff-0dd2-4c43-b72a-7d852ae00822" containerName="configure-network-openstack-openstack-networker" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.804323 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ad45eff-0dd2-4c43-b72a-7d852ae00822" containerName="configure-network-openstack-openstack-networker" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.804677 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ad45eff-0dd2-4c43-b72a-7d852ae00822" containerName="configure-network-openstack-openstack-networker" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.805756 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.807688 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.808525 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.814164 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-networker-gbqp4"] Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.898826 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddh7x\" (UniqueName: \"kubernetes.io/projected/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-kube-api-access-ddh7x\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.899285 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-inventory\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:08 crc kubenswrapper[4940]: I1126 09:16:08.899349 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-ssh-key\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:09 crc kubenswrapper[4940]: I1126 09:16:09.001010 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddh7x\" (UniqueName: \"kubernetes.io/projected/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-kube-api-access-ddh7x\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:09 crc kubenswrapper[4940]: I1126 09:16:09.001177 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-inventory\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:09 crc kubenswrapper[4940]: I1126 09:16:09.001225 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-ssh-key\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:09 crc kubenswrapper[4940]: I1126 09:16:09.007552 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-inventory\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:09 crc kubenswrapper[4940]: I1126 09:16:09.008462 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-ssh-key\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:09 crc kubenswrapper[4940]: I1126 09:16:09.029566 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddh7x\" (UniqueName: \"kubernetes.io/projected/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-kube-api-access-ddh7x\") pod \"validate-network-openstack-openstack-networker-gbqp4\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:09 crc kubenswrapper[4940]: I1126 09:16:09.179138 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:09 crc kubenswrapper[4940]: I1126 09:16:09.806352 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-networker-gbqp4"] Nov 26 09:16:10 crc kubenswrapper[4940]: I1126 09:16:10.666990 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" event={"ID":"89f56311-2eb5-4dc4-8fb6-137d98ae1c48","Type":"ContainerStarted","Data":"2a8554c55fa8e35b308390c029a5fcb2098737eb25c1d22a00cf4be896e884b7"} Nov 26 09:16:10 crc kubenswrapper[4940]: I1126 09:16:10.667341 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" event={"ID":"89f56311-2eb5-4dc4-8fb6-137d98ae1c48","Type":"ContainerStarted","Data":"d8806e5996f27a8d93a026b2bff07cd90fab9b287f26826a263df250d4467e52"} Nov 26 09:16:10 crc kubenswrapper[4940]: I1126 09:16:10.701223 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" podStartSLOduration=2.222546851 podStartE2EDuration="2.701197728s" podCreationTimestamp="2025-11-26 09:16:08 +0000 UTC" firstStartedPulling="2025-11-26 09:16:09.801659762 +0000 UTC m=+8471.321801381" lastFinishedPulling="2025-11-26 09:16:10.280310629 +0000 UTC m=+8471.800452258" observedRunningTime="2025-11-26 09:16:10.684766206 +0000 UTC m=+8472.204907825" watchObservedRunningTime="2025-11-26 09:16:10.701197728 +0000 UTC m=+8472.221339357" Nov 26 09:16:15 crc kubenswrapper[4940]: I1126 09:16:15.726922 4940 generic.go:334] "Generic (PLEG): container finished" podID="89f56311-2eb5-4dc4-8fb6-137d98ae1c48" containerID="2a8554c55fa8e35b308390c029a5fcb2098737eb25c1d22a00cf4be896e884b7" exitCode=0 Nov 26 09:16:15 crc kubenswrapper[4940]: I1126 09:16:15.727178 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" event={"ID":"89f56311-2eb5-4dc4-8fb6-137d98ae1c48","Type":"ContainerDied","Data":"2a8554c55fa8e35b308390c029a5fcb2098737eb25c1d22a00cf4be896e884b7"} Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.306679 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.388387 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddh7x\" (UniqueName: \"kubernetes.io/projected/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-kube-api-access-ddh7x\") pod \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.388671 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-inventory\") pod \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.388767 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-ssh-key\") pod \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\" (UID: \"89f56311-2eb5-4dc4-8fb6-137d98ae1c48\") " Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.394551 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-kube-api-access-ddh7x" (OuterVolumeSpecName: "kube-api-access-ddh7x") pod "89f56311-2eb5-4dc4-8fb6-137d98ae1c48" (UID: "89f56311-2eb5-4dc4-8fb6-137d98ae1c48"). InnerVolumeSpecName "kube-api-access-ddh7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.417650 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "89f56311-2eb5-4dc4-8fb6-137d98ae1c48" (UID: "89f56311-2eb5-4dc4-8fb6-137d98ae1c48"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.427946 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-inventory" (OuterVolumeSpecName: "inventory") pod "89f56311-2eb5-4dc4-8fb6-137d98ae1c48" (UID: "89f56311-2eb5-4dc4-8fb6-137d98ae1c48"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.491644 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.491703 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.491717 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddh7x\" (UniqueName: \"kubernetes.io/projected/89f56311-2eb5-4dc4-8fb6-137d98ae1c48-kube-api-access-ddh7x\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.751370 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" event={"ID":"89f56311-2eb5-4dc4-8fb6-137d98ae1c48","Type":"ContainerDied","Data":"d8806e5996f27a8d93a026b2bff07cd90fab9b287f26826a263df250d4467e52"} Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.751431 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8806e5996f27a8d93a026b2bff07cd90fab9b287f26826a263df250d4467e52" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.751464 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-networker-gbqp4" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.944956 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-networker-r7r5p"] Nov 26 09:16:17 crc kubenswrapper[4940]: E1126 09:16:17.945596 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f56311-2eb5-4dc4-8fb6-137d98ae1c48" containerName="validate-network-openstack-openstack-networker" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.945623 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f56311-2eb5-4dc4-8fb6-137d98ae1c48" containerName="validate-network-openstack-openstack-networker" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.946021 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="89f56311-2eb5-4dc4-8fb6-137d98ae1c48" containerName="validate-network-openstack-openstack-networker" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.947309 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.949696 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.949922 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:16:17 crc kubenswrapper[4940]: I1126 09:16:17.968871 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-networker-r7r5p"] Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.001880 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b44pb\" (UniqueName: \"kubernetes.io/projected/84b4be12-9142-400e-b21b-2cdd5263b101-kube-api-access-b44pb\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.002182 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-ssh-key\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.002357 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-inventory\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.104346 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-ssh-key\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.104429 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-inventory\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.104580 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b44pb\" (UniqueName: \"kubernetes.io/projected/84b4be12-9142-400e-b21b-2cdd5263b101-kube-api-access-b44pb\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.110899 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-ssh-key\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.111078 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-inventory\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.138790 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b44pb\" (UniqueName: \"kubernetes.io/projected/84b4be12-9142-400e-b21b-2cdd5263b101-kube-api-access-b44pb\") pod \"install-os-openstack-openstack-networker-r7r5p\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.272534 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.643215 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-networker-r7r5p"] Nov 26 09:16:18 crc kubenswrapper[4940]: I1126 09:16:18.761467 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-networker-r7r5p" event={"ID":"84b4be12-9142-400e-b21b-2cdd5263b101","Type":"ContainerStarted","Data":"42259d3cc6870da5ba5a257e5dbd852145d551a95d2366a67fa190befe53b17b"} Nov 26 09:16:20 crc kubenswrapper[4940]: I1126 09:16:20.784909 4940 generic.go:334] "Generic (PLEG): container finished" podID="b6222fff-0241-4d22-b985-de311d9dcd17" containerID="46e62df5fc02dd019032d2445198cce571b9792007cc2c4a03820140d8539235" exitCode=0 Nov 26 09:16:20 crc kubenswrapper[4940]: I1126 09:16:20.785571 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" event={"ID":"b6222fff-0241-4d22-b985-de311d9dcd17","Type":"ContainerDied","Data":"46e62df5fc02dd019032d2445198cce571b9792007cc2c4a03820140d8539235"} Nov 26 09:16:20 crc kubenswrapper[4940]: I1126 09:16:20.799060 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-networker-r7r5p" event={"ID":"84b4be12-9142-400e-b21b-2cdd5263b101","Type":"ContainerStarted","Data":"3f3fff51e3e93523ce3e116afa198dbcfb233dfa972ea0cf6c1c8f3fec69db00"} Nov 26 09:16:20 crc kubenswrapper[4940]: I1126 09:16:20.838293 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-networker-r7r5p" podStartSLOduration=2.8940423859999997 podStartE2EDuration="3.838269724s" podCreationTimestamp="2025-11-26 09:16:17 +0000 UTC" firstStartedPulling="2025-11-26 09:16:18.654663845 +0000 UTC m=+8480.174805464" lastFinishedPulling="2025-11-26 09:16:19.598891173 +0000 UTC m=+8481.119032802" observedRunningTime="2025-11-26 09:16:20.832644785 +0000 UTC m=+8482.352786464" watchObservedRunningTime="2025-11-26 09:16:20.838269724 +0000 UTC m=+8482.358411353" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.325831 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.430124 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjz9f\" (UniqueName: \"kubernetes.io/projected/b6222fff-0241-4d22-b985-de311d9dcd17-kube-api-access-pjz9f\") pod \"b6222fff-0241-4d22-b985-de311d9dcd17\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.430413 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ceph\") pod \"b6222fff-0241-4d22-b985-de311d9dcd17\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.430502 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-inventory\") pod \"b6222fff-0241-4d22-b985-de311d9dcd17\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.430567 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ssh-key\") pod \"b6222fff-0241-4d22-b985-de311d9dcd17\" (UID: \"b6222fff-0241-4d22-b985-de311d9dcd17\") " Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.444930 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6222fff-0241-4d22-b985-de311d9dcd17-kube-api-access-pjz9f" (OuterVolumeSpecName: "kube-api-access-pjz9f") pod "b6222fff-0241-4d22-b985-de311d9dcd17" (UID: "b6222fff-0241-4d22-b985-de311d9dcd17"). InnerVolumeSpecName "kube-api-access-pjz9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.444961 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ceph" (OuterVolumeSpecName: "ceph") pod "b6222fff-0241-4d22-b985-de311d9dcd17" (UID: "b6222fff-0241-4d22-b985-de311d9dcd17"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.460734 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b6222fff-0241-4d22-b985-de311d9dcd17" (UID: "b6222fff-0241-4d22-b985-de311d9dcd17"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.467554 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-inventory" (OuterVolumeSpecName: "inventory") pod "b6222fff-0241-4d22-b985-de311d9dcd17" (UID: "b6222fff-0241-4d22-b985-de311d9dcd17"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.532656 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.532689 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjz9f\" (UniqueName: \"kubernetes.io/projected/b6222fff-0241-4d22-b985-de311d9dcd17-kube-api-access-pjz9f\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.532702 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.532710 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6222fff-0241-4d22-b985-de311d9dcd17-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.830113 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" event={"ID":"b6222fff-0241-4d22-b985-de311d9dcd17","Type":"ContainerDied","Data":"6911cd9d76e79a0f79ce7cf1c7e86ef28ff79062f3123bb00813f13d89c21040"} Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.830173 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6911cd9d76e79a0f79ce7cf1c7e86ef28ff79062f3123bb00813f13d89c21040" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.830550 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-xv47l" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.927143 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-x7s87"] Nov 26 09:16:22 crc kubenswrapper[4940]: E1126 09:16:22.927836 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6222fff-0241-4d22-b985-de311d9dcd17" containerName="configure-network-openstack-openstack-cell1" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.927962 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6222fff-0241-4d22-b985-de311d9dcd17" containerName="configure-network-openstack-openstack-cell1" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.928303 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6222fff-0241-4d22-b985-de311d9dcd17" containerName="configure-network-openstack-openstack-cell1" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.930477 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.935384 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.935721 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:16:22 crc kubenswrapper[4940]: I1126 09:16:22.940336 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-x7s87"] Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.042512 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-inventory\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.042570 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqccd\" (UniqueName: \"kubernetes.io/projected/1fc41899-ad48-4759-8e0b-0f9108597ca3-kube-api-access-kqccd\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.042664 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ssh-key\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.042738 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ceph\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.145330 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ssh-key\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.145511 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ceph\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.145742 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-inventory\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.145853 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqccd\" (UniqueName: \"kubernetes.io/projected/1fc41899-ad48-4759-8e0b-0f9108597ca3-kube-api-access-kqccd\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.153069 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-inventory\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.155854 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ssh-key\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.170144 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ceph\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.182334 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqccd\" (UniqueName: \"kubernetes.io/projected/1fc41899-ad48-4759-8e0b-0f9108597ca3-kube-api-access-kqccd\") pod \"validate-network-openstack-openstack-cell1-x7s87\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.255565 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:23 crc kubenswrapper[4940]: I1126 09:16:23.872393 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-x7s87"] Nov 26 09:16:24 crc kubenswrapper[4940]: I1126 09:16:24.848886 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" event={"ID":"1fc41899-ad48-4759-8e0b-0f9108597ca3","Type":"ContainerStarted","Data":"d3471afa29b0eb5bf63637ad5e682a0b9e028cd2252e5d25f08847b7fdf875c4"} Nov 26 09:16:24 crc kubenswrapper[4940]: I1126 09:16:24.849222 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" event={"ID":"1fc41899-ad48-4759-8e0b-0f9108597ca3","Type":"ContainerStarted","Data":"c36766e8facd4065426b66ea9918e3c8394a9e262e47d3ab8e01177f85ed0c90"} Nov 26 09:16:24 crc kubenswrapper[4940]: I1126 09:16:24.871636 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" podStartSLOduration=2.227877133 podStartE2EDuration="2.871609048s" podCreationTimestamp="2025-11-26 09:16:22 +0000 UTC" firstStartedPulling="2025-11-26 09:16:23.87015701 +0000 UTC m=+8485.390298629" lastFinishedPulling="2025-11-26 09:16:24.513888915 +0000 UTC m=+8486.034030544" observedRunningTime="2025-11-26 09:16:24.87136998 +0000 UTC m=+8486.391511609" watchObservedRunningTime="2025-11-26 09:16:24.871609048 +0000 UTC m=+8486.391750667" Nov 26 09:16:29 crc kubenswrapper[4940]: I1126 09:16:29.917373 4940 generic.go:334] "Generic (PLEG): container finished" podID="1fc41899-ad48-4759-8e0b-0f9108597ca3" containerID="d3471afa29b0eb5bf63637ad5e682a0b9e028cd2252e5d25f08847b7fdf875c4" exitCode=0 Nov 26 09:16:29 crc kubenswrapper[4940]: I1126 09:16:29.917442 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" event={"ID":"1fc41899-ad48-4759-8e0b-0f9108597ca3","Type":"ContainerDied","Data":"d3471afa29b0eb5bf63637ad5e682a0b9e028cd2252e5d25f08847b7fdf875c4"} Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.450278 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.530862 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ssh-key\") pod \"1fc41899-ad48-4759-8e0b-0f9108597ca3\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.530982 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqccd\" (UniqueName: \"kubernetes.io/projected/1fc41899-ad48-4759-8e0b-0f9108597ca3-kube-api-access-kqccd\") pod \"1fc41899-ad48-4759-8e0b-0f9108597ca3\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.531153 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-inventory\") pod \"1fc41899-ad48-4759-8e0b-0f9108597ca3\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.531179 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ceph\") pod \"1fc41899-ad48-4759-8e0b-0f9108597ca3\" (UID: \"1fc41899-ad48-4759-8e0b-0f9108597ca3\") " Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.536774 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ceph" (OuterVolumeSpecName: "ceph") pod "1fc41899-ad48-4759-8e0b-0f9108597ca3" (UID: "1fc41899-ad48-4759-8e0b-0f9108597ca3"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.539122 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fc41899-ad48-4759-8e0b-0f9108597ca3-kube-api-access-kqccd" (OuterVolumeSpecName: "kube-api-access-kqccd") pod "1fc41899-ad48-4759-8e0b-0f9108597ca3" (UID: "1fc41899-ad48-4759-8e0b-0f9108597ca3"). InnerVolumeSpecName "kube-api-access-kqccd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.565886 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-inventory" (OuterVolumeSpecName: "inventory") pod "1fc41899-ad48-4759-8e0b-0f9108597ca3" (UID: "1fc41899-ad48-4759-8e0b-0f9108597ca3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.571513 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1fc41899-ad48-4759-8e0b-0f9108597ca3" (UID: "1fc41899-ad48-4759-8e0b-0f9108597ca3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.633707 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.633758 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.633776 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1fc41899-ad48-4759-8e0b-0f9108597ca3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.633795 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqccd\" (UniqueName: \"kubernetes.io/projected/1fc41899-ad48-4759-8e0b-0f9108597ca3-kube-api-access-kqccd\") on node \"crc\" DevicePath \"\"" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.946955 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" event={"ID":"1fc41899-ad48-4759-8e0b-0f9108597ca3","Type":"ContainerDied","Data":"c36766e8facd4065426b66ea9918e3c8394a9e262e47d3ab8e01177f85ed0c90"} Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.947478 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c36766e8facd4065426b66ea9918e3c8394a9e262e47d3ab8e01177f85ed0c90" Nov 26 09:16:31 crc kubenswrapper[4940]: I1126 09:16:31.947029 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-x7s87" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.027248 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-2sgj8"] Nov 26 09:16:32 crc kubenswrapper[4940]: E1126 09:16:32.027749 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fc41899-ad48-4759-8e0b-0f9108597ca3" containerName="validate-network-openstack-openstack-cell1" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.027767 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fc41899-ad48-4759-8e0b-0f9108597ca3" containerName="validate-network-openstack-openstack-cell1" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.027981 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fc41899-ad48-4759-8e0b-0f9108597ca3" containerName="validate-network-openstack-openstack-cell1" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.028767 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.031453 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.031952 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.041502 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-2sgj8"] Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.144232 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-inventory\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.144486 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ssh-key\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.144539 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ceph\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.144589 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95x88\" (UniqueName: \"kubernetes.io/projected/146c140b-7adb-4641-b621-e8bd5f3bcb3c-kube-api-access-95x88\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.246763 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-inventory\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.246960 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ssh-key\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.247004 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ceph\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.247075 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95x88\" (UniqueName: \"kubernetes.io/projected/146c140b-7adb-4641-b621-e8bd5f3bcb3c-kube-api-access-95x88\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.250542 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-inventory\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.251389 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ssh-key\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.258516 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ceph\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.268332 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95x88\" (UniqueName: \"kubernetes.io/projected/146c140b-7adb-4641-b621-e8bd5f3bcb3c-kube-api-access-95x88\") pod \"install-os-openstack-openstack-cell1-2sgj8\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.354715 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:16:32 crc kubenswrapper[4940]: I1126 09:16:32.952814 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-2sgj8"] Nov 26 09:16:32 crc kubenswrapper[4940]: W1126 09:16:32.956107 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod146c140b_7adb_4641_b621_e8bd5f3bcb3c.slice/crio-ad472c9216e6c08f7d4bb2b1fc3c8acdc631b5eb2352a1c9c93d742c029858b0 WatchSource:0}: Error finding container ad472c9216e6c08f7d4bb2b1fc3c8acdc631b5eb2352a1c9c93d742c029858b0: Status 404 returned error can't find the container with id ad472c9216e6c08f7d4bb2b1fc3c8acdc631b5eb2352a1c9c93d742c029858b0 Nov 26 09:16:33 crc kubenswrapper[4940]: I1126 09:16:33.967945 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" event={"ID":"146c140b-7adb-4641-b621-e8bd5f3bcb3c","Type":"ContainerStarted","Data":"028f622f7fb2270eda1ba56a0f6f9d8949443f1192fb3f7edf04a39335d9fc88"} Nov 26 09:16:33 crc kubenswrapper[4940]: I1126 09:16:33.968229 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" event={"ID":"146c140b-7adb-4641-b621-e8bd5f3bcb3c","Type":"ContainerStarted","Data":"ad472c9216e6c08f7d4bb2b1fc3c8acdc631b5eb2352a1c9c93d742c029858b0"} Nov 26 09:16:33 crc kubenswrapper[4940]: I1126 09:16:33.997564 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" podStartSLOduration=2.288743106 podStartE2EDuration="2.997530209s" podCreationTimestamp="2025-11-26 09:16:31 +0000 UTC" firstStartedPulling="2025-11-26 09:16:32.961441791 +0000 UTC m=+8494.481583420" lastFinishedPulling="2025-11-26 09:16:33.670228894 +0000 UTC m=+8495.190370523" observedRunningTime="2025-11-26 09:16:33.987577812 +0000 UTC m=+8495.507719431" watchObservedRunningTime="2025-11-26 09:16:33.997530209 +0000 UTC m=+8495.517671838" Nov 26 09:17:09 crc kubenswrapper[4940]: I1126 09:17:09.379215 4940 generic.go:334] "Generic (PLEG): container finished" podID="84b4be12-9142-400e-b21b-2cdd5263b101" containerID="3f3fff51e3e93523ce3e116afa198dbcfb233dfa972ea0cf6c1c8f3fec69db00" exitCode=0 Nov 26 09:17:09 crc kubenswrapper[4940]: I1126 09:17:09.379299 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-networker-r7r5p" event={"ID":"84b4be12-9142-400e-b21b-2cdd5263b101","Type":"ContainerDied","Data":"3f3fff51e3e93523ce3e116afa198dbcfb233dfa972ea0cf6c1c8f3fec69db00"} Nov 26 09:17:10 crc kubenswrapper[4940]: I1126 09:17:10.961275 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.064708 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b44pb\" (UniqueName: \"kubernetes.io/projected/84b4be12-9142-400e-b21b-2cdd5263b101-kube-api-access-b44pb\") pod \"84b4be12-9142-400e-b21b-2cdd5263b101\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.064798 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-ssh-key\") pod \"84b4be12-9142-400e-b21b-2cdd5263b101\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.065163 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-inventory\") pod \"84b4be12-9142-400e-b21b-2cdd5263b101\" (UID: \"84b4be12-9142-400e-b21b-2cdd5263b101\") " Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.084465 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84b4be12-9142-400e-b21b-2cdd5263b101-kube-api-access-b44pb" (OuterVolumeSpecName: "kube-api-access-b44pb") pod "84b4be12-9142-400e-b21b-2cdd5263b101" (UID: "84b4be12-9142-400e-b21b-2cdd5263b101"). InnerVolumeSpecName "kube-api-access-b44pb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.099538 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "84b4be12-9142-400e-b21b-2cdd5263b101" (UID: "84b4be12-9142-400e-b21b-2cdd5263b101"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.102816 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-inventory" (OuterVolumeSpecName: "inventory") pod "84b4be12-9142-400e-b21b-2cdd5263b101" (UID: "84b4be12-9142-400e-b21b-2cdd5263b101"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.167356 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.167746 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b44pb\" (UniqueName: \"kubernetes.io/projected/84b4be12-9142-400e-b21b-2cdd5263b101-kube-api-access-b44pb\") on node \"crc\" DevicePath \"\"" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.167839 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84b4be12-9142-400e-b21b-2cdd5263b101-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.410851 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-networker-r7r5p" event={"ID":"84b4be12-9142-400e-b21b-2cdd5263b101","Type":"ContainerDied","Data":"42259d3cc6870da5ba5a257e5dbd852145d551a95d2366a67fa190befe53b17b"} Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.410902 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42259d3cc6870da5ba5a257e5dbd852145d551a95d2366a67fa190befe53b17b" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.410923 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-networker-r7r5p" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.517774 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-networker-vg4mh"] Nov 26 09:17:11 crc kubenswrapper[4940]: E1126 09:17:11.518237 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84b4be12-9142-400e-b21b-2cdd5263b101" containerName="install-os-openstack-openstack-networker" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.518255 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="84b4be12-9142-400e-b21b-2cdd5263b101" containerName="install-os-openstack-openstack-networker" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.518501 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="84b4be12-9142-400e-b21b-2cdd5263b101" containerName="install-os-openstack-openstack-networker" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.519225 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.524711 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.525344 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.534746 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-networker-vg4mh"] Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.675943 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-ssh-key\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.676013 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmg59\" (UniqueName: \"kubernetes.io/projected/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-kube-api-access-nmg59\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.676441 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-inventory\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.778123 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-ssh-key\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.778166 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmg59\" (UniqueName: \"kubernetes.io/projected/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-kube-api-access-nmg59\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.778279 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-inventory\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.782775 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-ssh-key\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.783102 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-inventory\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.802675 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmg59\" (UniqueName: \"kubernetes.io/projected/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-kube-api-access-nmg59\") pod \"configure-os-openstack-openstack-networker-vg4mh\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:11 crc kubenswrapper[4940]: I1126 09:17:11.843544 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:17:12 crc kubenswrapper[4940]: I1126 09:17:12.413340 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-networker-vg4mh"] Nov 26 09:17:13 crc kubenswrapper[4940]: I1126 09:17:13.446061 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" event={"ID":"c49828fa-6d5e-4808-a2b7-10cc1fecaa64","Type":"ContainerStarted","Data":"cdb9d15d5ac59480af480c9846fc33f7635bc9b2d8d91e2c80fa05a8e2c313d5"} Nov 26 09:17:14 crc kubenswrapper[4940]: I1126 09:17:14.461169 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" event={"ID":"c49828fa-6d5e-4808-a2b7-10cc1fecaa64","Type":"ContainerStarted","Data":"559663d2fd2c65311c8510d6cbd8f5dac70563f2b71154a915b3de24b264fa75"} Nov 26 09:17:14 crc kubenswrapper[4940]: I1126 09:17:14.478022 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" podStartSLOduration=2.687991585 podStartE2EDuration="3.47800234s" podCreationTimestamp="2025-11-26 09:17:11 +0000 UTC" firstStartedPulling="2025-11-26 09:17:12.42738034 +0000 UTC m=+8533.947521959" lastFinishedPulling="2025-11-26 09:17:13.217391095 +0000 UTC m=+8534.737532714" observedRunningTime="2025-11-26 09:17:14.475913184 +0000 UTC m=+8535.996054803" watchObservedRunningTime="2025-11-26 09:17:14.47800234 +0000 UTC m=+8535.998143959" Nov 26 09:17:21 crc kubenswrapper[4940]: I1126 09:17:21.728789 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:17:21 crc kubenswrapper[4940]: I1126 09:17:21.729444 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:17:22 crc kubenswrapper[4940]: I1126 09:17:22.556114 4940 generic.go:334] "Generic (PLEG): container finished" podID="146c140b-7adb-4641-b621-e8bd5f3bcb3c" containerID="028f622f7fb2270eda1ba56a0f6f9d8949443f1192fb3f7edf04a39335d9fc88" exitCode=0 Nov 26 09:17:22 crc kubenswrapper[4940]: I1126 09:17:22.556432 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" event={"ID":"146c140b-7adb-4641-b621-e8bd5f3bcb3c","Type":"ContainerDied","Data":"028f622f7fb2270eda1ba56a0f6f9d8949443f1192fb3f7edf04a39335d9fc88"} Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.201771 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.372933 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ssh-key\") pod \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.372978 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-inventory\") pod \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.373082 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95x88\" (UniqueName: \"kubernetes.io/projected/146c140b-7adb-4641-b621-e8bd5f3bcb3c-kube-api-access-95x88\") pod \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.373238 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ceph\") pod \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\" (UID: \"146c140b-7adb-4641-b621-e8bd5f3bcb3c\") " Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.378233 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ceph" (OuterVolumeSpecName: "ceph") pod "146c140b-7adb-4641-b621-e8bd5f3bcb3c" (UID: "146c140b-7adb-4641-b621-e8bd5f3bcb3c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.381602 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/146c140b-7adb-4641-b621-e8bd5f3bcb3c-kube-api-access-95x88" (OuterVolumeSpecName: "kube-api-access-95x88") pod "146c140b-7adb-4641-b621-e8bd5f3bcb3c" (UID: "146c140b-7adb-4641-b621-e8bd5f3bcb3c"). InnerVolumeSpecName "kube-api-access-95x88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.404296 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-inventory" (OuterVolumeSpecName: "inventory") pod "146c140b-7adb-4641-b621-e8bd5f3bcb3c" (UID: "146c140b-7adb-4641-b621-e8bd5f3bcb3c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.414176 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "146c140b-7adb-4641-b621-e8bd5f3bcb3c" (UID: "146c140b-7adb-4641-b621-e8bd5f3bcb3c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.475478 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.475682 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.475739 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c140b-7adb-4641-b621-e8bd5f3bcb3c-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.475821 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95x88\" (UniqueName: \"kubernetes.io/projected/146c140b-7adb-4641-b621-e8bd5f3bcb3c-kube-api-access-95x88\") on node \"crc\" DevicePath \"\"" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.585754 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" event={"ID":"146c140b-7adb-4641-b621-e8bd5f3bcb3c","Type":"ContainerDied","Data":"ad472c9216e6c08f7d4bb2b1fc3c8acdc631b5eb2352a1c9c93d742c029858b0"} Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.585804 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad472c9216e6c08f7d4bb2b1fc3c8acdc631b5eb2352a1c9c93d742c029858b0" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.585906 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-2sgj8" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.657453 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-ztjn5"] Nov 26 09:17:24 crc kubenswrapper[4940]: E1126 09:17:24.657935 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="146c140b-7adb-4641-b621-e8bd5f3bcb3c" containerName="install-os-openstack-openstack-cell1" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.657958 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="146c140b-7adb-4641-b621-e8bd5f3bcb3c" containerName="install-os-openstack-openstack-cell1" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.658252 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="146c140b-7adb-4641-b621-e8bd5f3bcb3c" containerName="install-os-openstack-openstack-cell1" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.659000 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.660983 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.661292 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.668287 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-ztjn5"] Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.781981 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-inventory\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.782063 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ssh-key\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.782531 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gw9s\" (UniqueName: \"kubernetes.io/projected/02107190-e269-4d46-a669-6b73512247fa-kube-api-access-6gw9s\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.782844 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ceph\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.884635 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-inventory\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.884721 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ssh-key\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.884975 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gw9s\" (UniqueName: \"kubernetes.io/projected/02107190-e269-4d46-a669-6b73512247fa-kube-api-access-6gw9s\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.885127 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ceph\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.888547 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ssh-key\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.892793 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-inventory\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.895969 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ceph\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.913832 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gw9s\" (UniqueName: \"kubernetes.io/projected/02107190-e269-4d46-a669-6b73512247fa-kube-api-access-6gw9s\") pod \"configure-os-openstack-openstack-cell1-ztjn5\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:24 crc kubenswrapper[4940]: I1126 09:17:24.979669 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:17:25 crc kubenswrapper[4940]: I1126 09:17:25.551037 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-ztjn5"] Nov 26 09:17:25 crc kubenswrapper[4940]: I1126 09:17:25.599204 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" event={"ID":"02107190-e269-4d46-a669-6b73512247fa","Type":"ContainerStarted","Data":"20f2bc2c2a49090b236d9b32f25a5c4ca513590dc309b937cc44916ce062d83c"} Nov 26 09:17:26 crc kubenswrapper[4940]: I1126 09:17:26.620624 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" event={"ID":"02107190-e269-4d46-a669-6b73512247fa","Type":"ContainerStarted","Data":"8a479f0f728229e9642d374c22c428c02be965174df5f891a632192e9920937e"} Nov 26 09:17:26 crc kubenswrapper[4940]: I1126 09:17:26.658283 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" podStartSLOduration=2.135550696 podStartE2EDuration="2.658265493s" podCreationTimestamp="2025-11-26 09:17:24 +0000 UTC" firstStartedPulling="2025-11-26 09:17:25.538294318 +0000 UTC m=+8547.058435937" lastFinishedPulling="2025-11-26 09:17:26.061009115 +0000 UTC m=+8547.581150734" observedRunningTime="2025-11-26 09:17:26.643850575 +0000 UTC m=+8548.163992194" watchObservedRunningTime="2025-11-26 09:17:26.658265493 +0000 UTC m=+8548.178407112" Nov 26 09:17:51 crc kubenswrapper[4940]: I1126 09:17:51.728755 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:17:51 crc kubenswrapper[4940]: I1126 09:17:51.729294 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:18:08 crc kubenswrapper[4940]: I1126 09:18:08.062984 4940 generic.go:334] "Generic (PLEG): container finished" podID="c49828fa-6d5e-4808-a2b7-10cc1fecaa64" containerID="559663d2fd2c65311c8510d6cbd8f5dac70563f2b71154a915b3de24b264fa75" exitCode=0 Nov 26 09:18:08 crc kubenswrapper[4940]: I1126 09:18:08.063435 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" event={"ID":"c49828fa-6d5e-4808-a2b7-10cc1fecaa64","Type":"ContainerDied","Data":"559663d2fd2c65311c8510d6cbd8f5dac70563f2b71154a915b3de24b264fa75"} Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.687397 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.854135 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmg59\" (UniqueName: \"kubernetes.io/projected/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-kube-api-access-nmg59\") pod \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.854273 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-ssh-key\") pod \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.854332 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-inventory\") pod \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\" (UID: \"c49828fa-6d5e-4808-a2b7-10cc1fecaa64\") " Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.862356 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-kube-api-access-nmg59" (OuterVolumeSpecName: "kube-api-access-nmg59") pod "c49828fa-6d5e-4808-a2b7-10cc1fecaa64" (UID: "c49828fa-6d5e-4808-a2b7-10cc1fecaa64"). InnerVolumeSpecName "kube-api-access-nmg59". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.888194 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-inventory" (OuterVolumeSpecName: "inventory") pod "c49828fa-6d5e-4808-a2b7-10cc1fecaa64" (UID: "c49828fa-6d5e-4808-a2b7-10cc1fecaa64"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.891868 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c49828fa-6d5e-4808-a2b7-10cc1fecaa64" (UID: "c49828fa-6d5e-4808-a2b7-10cc1fecaa64"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.956697 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmg59\" (UniqueName: \"kubernetes.io/projected/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-kube-api-access-nmg59\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.956739 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:09 crc kubenswrapper[4940]: I1126 09:18:09.956752 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c49828fa-6d5e-4808-a2b7-10cc1fecaa64-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.086673 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" event={"ID":"c49828fa-6d5e-4808-a2b7-10cc1fecaa64","Type":"ContainerDied","Data":"cdb9d15d5ac59480af480c9846fc33f7635bc9b2d8d91e2c80fa05a8e2c313d5"} Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.086724 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-networker-vg4mh" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.086728 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cdb9d15d5ac59480af480c9846fc33f7635bc9b2d8d91e2c80fa05a8e2c313d5" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.236084 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-networker-csmg9"] Nov 26 09:18:10 crc kubenswrapper[4940]: E1126 09:18:10.236501 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c49828fa-6d5e-4808-a2b7-10cc1fecaa64" containerName="configure-os-openstack-openstack-networker" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.236520 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c49828fa-6d5e-4808-a2b7-10cc1fecaa64" containerName="configure-os-openstack-openstack-networker" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.236748 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c49828fa-6d5e-4808-a2b7-10cc1fecaa64" containerName="configure-os-openstack-openstack-networker" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.237618 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.241272 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.244031 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.255672 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-networker-csmg9"] Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.367759 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-inventory\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.368801 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdlt6\" (UniqueName: \"kubernetes.io/projected/c228e8ac-e24b-4d47-a741-e29048110c53-kube-api-access-cdlt6\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.369053 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-ssh-key\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.470925 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-ssh-key\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.470985 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-inventory\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.471152 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdlt6\" (UniqueName: \"kubernetes.io/projected/c228e8ac-e24b-4d47-a741-e29048110c53-kube-api-access-cdlt6\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.477703 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-inventory\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.479536 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-ssh-key\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.488572 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdlt6\" (UniqueName: \"kubernetes.io/projected/c228e8ac-e24b-4d47-a741-e29048110c53-kube-api-access-cdlt6\") pod \"run-os-openstack-openstack-networker-csmg9\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:10 crc kubenswrapper[4940]: I1126 09:18:10.559174 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:11 crc kubenswrapper[4940]: I1126 09:18:11.225597 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-networker-csmg9"] Nov 26 09:18:12 crc kubenswrapper[4940]: I1126 09:18:12.122802 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-networker-csmg9" event={"ID":"c228e8ac-e24b-4d47-a741-e29048110c53","Type":"ContainerStarted","Data":"9aa99acdfe1e7b5b4fb805214988182d980b194157f58b538601f1b2be5eb2da"} Nov 26 09:18:13 crc kubenswrapper[4940]: I1126 09:18:13.141317 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-networker-csmg9" event={"ID":"c228e8ac-e24b-4d47-a741-e29048110c53","Type":"ContainerStarted","Data":"e27fbc3b46d5d5ccb938e0d3a0ac7be6efed8fb6c0f43a79c1c13c373d5108de"} Nov 26 09:18:16 crc kubenswrapper[4940]: I1126 09:18:16.178289 4940 generic.go:334] "Generic (PLEG): container finished" podID="02107190-e269-4d46-a669-6b73512247fa" containerID="8a479f0f728229e9642d374c22c428c02be965174df5f891a632192e9920937e" exitCode=0 Nov 26 09:18:16 crc kubenswrapper[4940]: I1126 09:18:16.178386 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" event={"ID":"02107190-e269-4d46-a669-6b73512247fa","Type":"ContainerDied","Data":"8a479f0f728229e9642d374c22c428c02be965174df5f891a632192e9920937e"} Nov 26 09:18:16 crc kubenswrapper[4940]: I1126 09:18:16.208080 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-networker-csmg9" podStartSLOduration=5.552468096 podStartE2EDuration="6.208027937s" podCreationTimestamp="2025-11-26 09:18:10 +0000 UTC" firstStartedPulling="2025-11-26 09:18:11.211846223 +0000 UTC m=+8592.731987842" lastFinishedPulling="2025-11-26 09:18:11.867406064 +0000 UTC m=+8593.387547683" observedRunningTime="2025-11-26 09:18:13.173973701 +0000 UTC m=+8594.694115320" watchObservedRunningTime="2025-11-26 09:18:16.208027937 +0000 UTC m=+8597.728169566" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.720631 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.889892 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-inventory\") pod \"02107190-e269-4d46-a669-6b73512247fa\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.890016 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ceph\") pod \"02107190-e269-4d46-a669-6b73512247fa\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.890117 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ssh-key\") pod \"02107190-e269-4d46-a669-6b73512247fa\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.890378 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gw9s\" (UniqueName: \"kubernetes.io/projected/02107190-e269-4d46-a669-6b73512247fa-kube-api-access-6gw9s\") pod \"02107190-e269-4d46-a669-6b73512247fa\" (UID: \"02107190-e269-4d46-a669-6b73512247fa\") " Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.895770 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02107190-e269-4d46-a669-6b73512247fa-kube-api-access-6gw9s" (OuterVolumeSpecName: "kube-api-access-6gw9s") pod "02107190-e269-4d46-a669-6b73512247fa" (UID: "02107190-e269-4d46-a669-6b73512247fa"). InnerVolumeSpecName "kube-api-access-6gw9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.896721 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ceph" (OuterVolumeSpecName: "ceph") pod "02107190-e269-4d46-a669-6b73512247fa" (UID: "02107190-e269-4d46-a669-6b73512247fa"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.925535 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "02107190-e269-4d46-a669-6b73512247fa" (UID: "02107190-e269-4d46-a669-6b73512247fa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.946026 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-inventory" (OuterVolumeSpecName: "inventory") pod "02107190-e269-4d46-a669-6b73512247fa" (UID: "02107190-e269-4d46-a669-6b73512247fa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.993697 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gw9s\" (UniqueName: \"kubernetes.io/projected/02107190-e269-4d46-a669-6b73512247fa-kube-api-access-6gw9s\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.993748 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.993766 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:17 crc kubenswrapper[4940]: I1126 09:18:17.993781 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02107190-e269-4d46-a669-6b73512247fa-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.198754 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" event={"ID":"02107190-e269-4d46-a669-6b73512247fa","Type":"ContainerDied","Data":"20f2bc2c2a49090b236d9b32f25a5c4ca513590dc309b937cc44916ce062d83c"} Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.198804 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20f2bc2c2a49090b236d9b32f25a5c4ca513590dc309b937cc44916ce062d83c" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.198862 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-ztjn5" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.312008 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-452hj"] Nov 26 09:18:18 crc kubenswrapper[4940]: E1126 09:18:18.312727 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02107190-e269-4d46-a669-6b73512247fa" containerName="configure-os-openstack-openstack-cell1" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.312762 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="02107190-e269-4d46-a669-6b73512247fa" containerName="configure-os-openstack-openstack-cell1" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.313149 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="02107190-e269-4d46-a669-6b73512247fa" containerName="configure-os-openstack-openstack-cell1" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.314283 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.316829 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.319856 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.324757 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-452hj"] Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.503681 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-networker\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-networker\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.503926 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-1\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.503948 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg2x8\" (UniqueName: \"kubernetes.io/projected/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-kube-api-access-wg2x8\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.503975 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-0\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.503996 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ceph\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.504087 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.606191 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-networker\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-networker\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.606250 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-1\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.606290 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg2x8\" (UniqueName: \"kubernetes.io/projected/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-kube-api-access-wg2x8\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.606337 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-0\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.606373 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ceph\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.606533 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.615153 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-0\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.616120 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.616579 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-1\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.618157 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-networker\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-networker\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.621678 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ceph\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.645855 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg2x8\" (UniqueName: \"kubernetes.io/projected/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-kube-api-access-wg2x8\") pod \"ssh-known-hosts-openstack-452hj\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:18 crc kubenswrapper[4940]: I1126 09:18:18.948661 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:19 crc kubenswrapper[4940]: I1126 09:18:19.540507 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-452hj"] Nov 26 09:18:20 crc kubenswrapper[4940]: I1126 09:18:20.224235 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-452hj" event={"ID":"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e","Type":"ContainerStarted","Data":"e5ee4deeceaa00a9fdc8094bef296e0dccd4eb25e06b1528ca5e04511bcc8cbf"} Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.242974 4940 generic.go:334] "Generic (PLEG): container finished" podID="c228e8ac-e24b-4d47-a741-e29048110c53" containerID="e27fbc3b46d5d5ccb938e0d3a0ac7be6efed8fb6c0f43a79c1c13c373d5108de" exitCode=0 Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.243105 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-networker-csmg9" event={"ID":"c228e8ac-e24b-4d47-a741-e29048110c53","Type":"ContainerDied","Data":"e27fbc3b46d5d5ccb938e0d3a0ac7be6efed8fb6c0f43a79c1c13c373d5108de"} Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.246555 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-452hj" event={"ID":"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e","Type":"ContainerStarted","Data":"10d1c616fc6059c9b2355325e070b117eea67ddb9e96310bd221d272b0d7ced8"} Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.291388 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-452hj" podStartSLOduration=2.775068467 podStartE2EDuration="3.291366501s" podCreationTimestamp="2025-11-26 09:18:18 +0000 UTC" firstStartedPulling="2025-11-26 09:18:19.540420446 +0000 UTC m=+8601.060562075" lastFinishedPulling="2025-11-26 09:18:20.05671847 +0000 UTC m=+8601.576860109" observedRunningTime="2025-11-26 09:18:21.276722715 +0000 UTC m=+8602.796864354" watchObservedRunningTime="2025-11-26 09:18:21.291366501 +0000 UTC m=+8602.811508120" Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.728355 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.728410 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.728446 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.729174 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9e9aff618bcec373fe2c25642a6939cbb5873fb50b14862a18a1ace3c3c4e20a"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:18:21 crc kubenswrapper[4940]: I1126 09:18:21.729227 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://9e9aff618bcec373fe2c25642a6939cbb5873fb50b14862a18a1ace3c3c4e20a" gracePeriod=600 Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.258735 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="9e9aff618bcec373fe2c25642a6939cbb5873fb50b14862a18a1ace3c3c4e20a" exitCode=0 Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.258813 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"9e9aff618bcec373fe2c25642a6939cbb5873fb50b14862a18a1ace3c3c4e20a"} Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.258965 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2"} Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.258992 4940 scope.go:117] "RemoveContainer" containerID="f5b018e0eef5edb23711d2bf43e2d65b77edfecec1526e590828535f5cff1902" Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.723394 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.901973 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-ssh-key\") pod \"c228e8ac-e24b-4d47-a741-e29048110c53\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.902225 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-inventory\") pod \"c228e8ac-e24b-4d47-a741-e29048110c53\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.902391 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdlt6\" (UniqueName: \"kubernetes.io/projected/c228e8ac-e24b-4d47-a741-e29048110c53-kube-api-access-cdlt6\") pod \"c228e8ac-e24b-4d47-a741-e29048110c53\" (UID: \"c228e8ac-e24b-4d47-a741-e29048110c53\") " Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.913616 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c228e8ac-e24b-4d47-a741-e29048110c53-kube-api-access-cdlt6" (OuterVolumeSpecName: "kube-api-access-cdlt6") pod "c228e8ac-e24b-4d47-a741-e29048110c53" (UID: "c228e8ac-e24b-4d47-a741-e29048110c53"). InnerVolumeSpecName "kube-api-access-cdlt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.938860 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c228e8ac-e24b-4d47-a741-e29048110c53" (UID: "c228e8ac-e24b-4d47-a741-e29048110c53"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:22 crc kubenswrapper[4940]: I1126 09:18:22.939187 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-inventory" (OuterVolumeSpecName: "inventory") pod "c228e8ac-e24b-4d47-a741-e29048110c53" (UID: "c228e8ac-e24b-4d47-a741-e29048110c53"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.005432 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdlt6\" (UniqueName: \"kubernetes.io/projected/c228e8ac-e24b-4d47-a741-e29048110c53-kube-api-access-cdlt6\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.005487 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.005504 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c228e8ac-e24b-4d47-a741-e29048110c53-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.271064 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-networker-csmg9" event={"ID":"c228e8ac-e24b-4d47-a741-e29048110c53","Type":"ContainerDied","Data":"9aa99acdfe1e7b5b4fb805214988182d980b194157f58b538601f1b2be5eb2da"} Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.272176 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9aa99acdfe1e7b5b4fb805214988182d980b194157f58b538601f1b2be5eb2da" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.271164 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-networker-csmg9" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.349580 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-networker-8ncnh"] Nov 26 09:18:23 crc kubenswrapper[4940]: E1126 09:18:23.350926 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c228e8ac-e24b-4d47-a741-e29048110c53" containerName="run-os-openstack-openstack-networker" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.351012 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c228e8ac-e24b-4d47-a741-e29048110c53" containerName="run-os-openstack-openstack-networker" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.351302 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c228e8ac-e24b-4d47-a741-e29048110c53" containerName="run-os-openstack-openstack-networker" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.352547 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.354886 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.364786 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-networker-8ncnh"] Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.515341 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-ssh-key\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.515429 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-inventory\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.515539 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5fxl\" (UniqueName: \"kubernetes.io/projected/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-kube-api-access-m5fxl\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.617801 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5fxl\" (UniqueName: \"kubernetes.io/projected/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-kube-api-access-m5fxl\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.617972 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-ssh-key\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.618055 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-inventory\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.623474 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-inventory\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.624648 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-ssh-key\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.644724 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5fxl\" (UniqueName: \"kubernetes.io/projected/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-kube-api-access-m5fxl\") pod \"reboot-os-openstack-openstack-networker-8ncnh\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:23 crc kubenswrapper[4940]: I1126 09:18:23.676311 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:24 crc kubenswrapper[4940]: W1126 09:18:24.217554 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d46bd9e_4b00_47c2_87ab_c82c54b3045b.slice/crio-18118236f5374004cbf9f925eb19f9f55140803fc7137c80e0ed902af4db77d8 WatchSource:0}: Error finding container 18118236f5374004cbf9f925eb19f9f55140803fc7137c80e0ed902af4db77d8: Status 404 returned error can't find the container with id 18118236f5374004cbf9f925eb19f9f55140803fc7137c80e0ed902af4db77d8 Nov 26 09:18:24 crc kubenswrapper[4940]: I1126 09:18:24.219027 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-networker-8ncnh"] Nov 26 09:18:24 crc kubenswrapper[4940]: I1126 09:18:24.288925 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" event={"ID":"6d46bd9e-4b00-47c2-87ab-c82c54b3045b","Type":"ContainerStarted","Data":"18118236f5374004cbf9f925eb19f9f55140803fc7137c80e0ed902af4db77d8"} Nov 26 09:18:25 crc kubenswrapper[4940]: I1126 09:18:25.302138 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" event={"ID":"6d46bd9e-4b00-47c2-87ab-c82c54b3045b","Type":"ContainerStarted","Data":"12c7cfa777d18fce2649a779fa5194b5bebaec0f136cea248a863fd449987d08"} Nov 26 09:18:25 crc kubenswrapper[4940]: I1126 09:18:25.329899 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" podStartSLOduration=1.818717859 podStartE2EDuration="2.329878108s" podCreationTimestamp="2025-11-26 09:18:23 +0000 UTC" firstStartedPulling="2025-11-26 09:18:24.220241053 +0000 UTC m=+8605.740382672" lastFinishedPulling="2025-11-26 09:18:24.731401292 +0000 UTC m=+8606.251542921" observedRunningTime="2025-11-26 09:18:25.320605754 +0000 UTC m=+8606.840747443" watchObservedRunningTime="2025-11-26 09:18:25.329878108 +0000 UTC m=+8606.850019737" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.096138 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f7jgg"] Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.106988 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.114824 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f7jgg"] Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.221309 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-catalog-content\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.221611 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-utilities\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.221732 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rctvh\" (UniqueName: \"kubernetes.io/projected/4bda4cba-ac15-4adb-97aa-31250c0fbf76-kube-api-access-rctvh\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.324734 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-utilities\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.324832 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rctvh\" (UniqueName: \"kubernetes.io/projected/4bda4cba-ac15-4adb-97aa-31250c0fbf76-kube-api-access-rctvh\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.324952 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-catalog-content\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.325186 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-utilities\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.325491 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-catalog-content\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.349883 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rctvh\" (UniqueName: \"kubernetes.io/projected/4bda4cba-ac15-4adb-97aa-31250c0fbf76-kube-api-access-rctvh\") pod \"certified-operators-f7jgg\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.482213 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:33 crc kubenswrapper[4940]: I1126 09:18:33.971121 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f7jgg"] Nov 26 09:18:34 crc kubenswrapper[4940]: I1126 09:18:34.407939 4940 generic.go:334] "Generic (PLEG): container finished" podID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerID="0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696" exitCode=0 Nov 26 09:18:34 crc kubenswrapper[4940]: I1126 09:18:34.408092 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7jgg" event={"ID":"4bda4cba-ac15-4adb-97aa-31250c0fbf76","Type":"ContainerDied","Data":"0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696"} Nov 26 09:18:34 crc kubenswrapper[4940]: I1126 09:18:34.408164 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7jgg" event={"ID":"4bda4cba-ac15-4adb-97aa-31250c0fbf76","Type":"ContainerStarted","Data":"7be3698ea34a6516253e49ee30c977d3cf4fc8af58afa90061c6c1646bf6f098"} Nov 26 09:18:35 crc kubenswrapper[4940]: I1126 09:18:35.419104 4940 generic.go:334] "Generic (PLEG): container finished" podID="621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" containerID="10d1c616fc6059c9b2355325e070b117eea67ddb9e96310bd221d272b0d7ced8" exitCode=0 Nov 26 09:18:35 crc kubenswrapper[4940]: I1126 09:18:35.419220 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-452hj" event={"ID":"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e","Type":"ContainerDied","Data":"10d1c616fc6059c9b2355325e070b117eea67ddb9e96310bd221d272b0d7ced8"} Nov 26 09:18:35 crc kubenswrapper[4940]: I1126 09:18:35.421592 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7jgg" event={"ID":"4bda4cba-ac15-4adb-97aa-31250c0fbf76","Type":"ContainerStarted","Data":"3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a"} Nov 26 09:18:36 crc kubenswrapper[4940]: I1126 09:18:36.438569 4940 generic.go:334] "Generic (PLEG): container finished" podID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerID="3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a" exitCode=0 Nov 26 09:18:36 crc kubenswrapper[4940]: I1126 09:18:36.438673 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7jgg" event={"ID":"4bda4cba-ac15-4adb-97aa-31250c0fbf76","Type":"ContainerDied","Data":"3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a"} Nov 26 09:18:36 crc kubenswrapper[4940]: I1126 09:18:36.913378 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.005817 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-cell1\") pod \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.005957 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-networker\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-networker\") pod \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.006018 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-0\") pod \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.006130 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-1\") pod \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.006158 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ceph\") pod \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.006277 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wg2x8\" (UniqueName: \"kubernetes.io/projected/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-kube-api-access-wg2x8\") pod \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\" (UID: \"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e\") " Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.013693 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-kube-api-access-wg2x8" (OuterVolumeSpecName: "kube-api-access-wg2x8") pod "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" (UID: "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e"). InnerVolumeSpecName "kube-api-access-wg2x8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.016340 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ceph" (OuterVolumeSpecName: "ceph") pod "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" (UID: "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.035275 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-1" (OuterVolumeSpecName: "inventory-1") pod "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" (UID: "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e"). InnerVolumeSpecName "inventory-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.038654 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" (UID: "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.052175 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" (UID: "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.060057 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-networker" (OuterVolumeSpecName: "ssh-key-openstack-networker") pod "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" (UID: "621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e"). InnerVolumeSpecName "ssh-key-openstack-networker". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.108441 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.109034 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-networker\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ssh-key-openstack-networker\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.109170 4940 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.109238 4940 reconciler_common.go:293] "Volume detached for volume \"inventory-1\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-inventory-1\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.109294 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.109346 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wg2x8\" (UniqueName: \"kubernetes.io/projected/621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e-kube-api-access-wg2x8\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.455474 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7jgg" event={"ID":"4bda4cba-ac15-4adb-97aa-31250c0fbf76","Type":"ContainerStarted","Data":"2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30"} Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.459987 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-452hj" event={"ID":"621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e","Type":"ContainerDied","Data":"e5ee4deeceaa00a9fdc8094bef296e0dccd4eb25e06b1528ca5e04511bcc8cbf"} Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.460027 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5ee4deeceaa00a9fdc8094bef296e0dccd4eb25e06b1528ca5e04511bcc8cbf" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.460121 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-452hj" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.501617 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f7jgg" podStartSLOduration=1.912954533 podStartE2EDuration="4.501588849s" podCreationTimestamp="2025-11-26 09:18:33 +0000 UTC" firstStartedPulling="2025-11-26 09:18:34.414341662 +0000 UTC m=+8615.934483281" lastFinishedPulling="2025-11-26 09:18:37.002975968 +0000 UTC m=+8618.523117597" observedRunningTime="2025-11-26 09:18:37.48116978 +0000 UTC m=+8619.001311399" watchObservedRunningTime="2025-11-26 09:18:37.501588849 +0000 UTC m=+8619.021730488" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.529063 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-tkxr6"] Nov 26 09:18:37 crc kubenswrapper[4940]: E1126 09:18:37.529659 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" containerName="ssh-known-hosts-openstack" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.529681 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" containerName="ssh-known-hosts-openstack" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.530009 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e" containerName="ssh-known-hosts-openstack" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.531238 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.533691 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.533699 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.540130 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-tkxr6"] Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.617758 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ceph\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.617805 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-inventory\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.617869 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkln4\" (UniqueName: \"kubernetes.io/projected/1e234172-c7fe-4989-b902-38458b50674f-kube-api-access-wkln4\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.617957 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ssh-key\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.719271 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ssh-key\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.719596 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ceph\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.719616 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-inventory\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.719675 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkln4\" (UniqueName: \"kubernetes.io/projected/1e234172-c7fe-4989-b902-38458b50674f-kube-api-access-wkln4\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.724280 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ssh-key\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.727851 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-inventory\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.732698 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ceph\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.741291 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkln4\" (UniqueName: \"kubernetes.io/projected/1e234172-c7fe-4989-b902-38458b50674f-kube-api-access-wkln4\") pod \"run-os-openstack-openstack-cell1-tkxr6\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:37 crc kubenswrapper[4940]: I1126 09:18:37.867993 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:38 crc kubenswrapper[4940]: I1126 09:18:38.531905 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-tkxr6"] Nov 26 09:18:39 crc kubenswrapper[4940]: I1126 09:18:39.485181 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" event={"ID":"1e234172-c7fe-4989-b902-38458b50674f","Type":"ContainerStarted","Data":"aad0412d8f60a48f93a34fc97f5ffb1936db5f45ad2489005970e7e6ec1a567d"} Nov 26 09:18:40 crc kubenswrapper[4940]: I1126 09:18:40.496209 4940 generic.go:334] "Generic (PLEG): container finished" podID="6d46bd9e-4b00-47c2-87ab-c82c54b3045b" containerID="12c7cfa777d18fce2649a779fa5194b5bebaec0f136cea248a863fd449987d08" exitCode=0 Nov 26 09:18:40 crc kubenswrapper[4940]: I1126 09:18:40.496822 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" event={"ID":"6d46bd9e-4b00-47c2-87ab-c82c54b3045b","Type":"ContainerDied","Data":"12c7cfa777d18fce2649a779fa5194b5bebaec0f136cea248a863fd449987d08"} Nov 26 09:18:40 crc kubenswrapper[4940]: I1126 09:18:40.500240 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" event={"ID":"1e234172-c7fe-4989-b902-38458b50674f","Type":"ContainerStarted","Data":"fc77e568d84180426ac639f62e73766ca8facec6d143a2e1c2a5c890d4511319"} Nov 26 09:18:40 crc kubenswrapper[4940]: I1126 09:18:40.550793 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" podStartSLOduration=2.268379677 podStartE2EDuration="3.550749094s" podCreationTimestamp="2025-11-26 09:18:37 +0000 UTC" firstStartedPulling="2025-11-26 09:18:38.526531263 +0000 UTC m=+8620.046672882" lastFinishedPulling="2025-11-26 09:18:39.80890067 +0000 UTC m=+8621.329042299" observedRunningTime="2025-11-26 09:18:40.53803003 +0000 UTC m=+8622.058171659" watchObservedRunningTime="2025-11-26 09:18:40.550749094 +0000 UTC m=+8622.070890713" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.037804 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.132690 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-inventory\") pod \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.132823 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5fxl\" (UniqueName: \"kubernetes.io/projected/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-kube-api-access-m5fxl\") pod \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.132919 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-ssh-key\") pod \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\" (UID: \"6d46bd9e-4b00-47c2-87ab-c82c54b3045b\") " Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.138665 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-kube-api-access-m5fxl" (OuterVolumeSpecName: "kube-api-access-m5fxl") pod "6d46bd9e-4b00-47c2-87ab-c82c54b3045b" (UID: "6d46bd9e-4b00-47c2-87ab-c82c54b3045b"). InnerVolumeSpecName "kube-api-access-m5fxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.172075 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6d46bd9e-4b00-47c2-87ab-c82c54b3045b" (UID: "6d46bd9e-4b00-47c2-87ab-c82c54b3045b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.173194 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-inventory" (OuterVolumeSpecName: "inventory") pod "6d46bd9e-4b00-47c2-87ab-c82c54b3045b" (UID: "6d46bd9e-4b00-47c2-87ab-c82c54b3045b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.234999 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.235060 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5fxl\" (UniqueName: \"kubernetes.io/projected/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-kube-api-access-m5fxl\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.235074 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6d46bd9e-4b00-47c2-87ab-c82c54b3045b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.522418 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" event={"ID":"6d46bd9e-4b00-47c2-87ab-c82c54b3045b","Type":"ContainerDied","Data":"18118236f5374004cbf9f925eb19f9f55140803fc7137c80e0ed902af4db77d8"} Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.522462 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18118236f5374004cbf9f925eb19f9f55140803fc7137c80e0ed902af4db77d8" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.522541 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-networker-8ncnh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.655776 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-networker-sz9vh"] Nov 26 09:18:42 crc kubenswrapper[4940]: E1126 09:18:42.656347 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d46bd9e-4b00-47c2-87ab-c82c54b3045b" containerName="reboot-os-openstack-openstack-networker" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.656372 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d46bd9e-4b00-47c2-87ab-c82c54b3045b" containerName="reboot-os-openstack-openstack-networker" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.656696 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d46bd9e-4b00-47c2-87ab-c82c54b3045b" containerName="reboot-os-openstack-openstack-networker" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.657638 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.659871 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.660072 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.668708 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-networker-sz9vh"] Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.746673 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mlrh\" (UniqueName: \"kubernetes.io/projected/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-kube-api-access-8mlrh\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.746803 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-inventory\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.746833 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ssh-key\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.746863 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.747028 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.747501 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.849285 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.849338 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mlrh\" (UniqueName: \"kubernetes.io/projected/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-kube-api-access-8mlrh\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.849387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-inventory\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.849410 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ssh-key\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.849435 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.849472 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.854762 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-inventory\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.855616 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.855991 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.856219 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ssh-key\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.857777 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.881911 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mlrh\" (UniqueName: \"kubernetes.io/projected/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-kube-api-access-8mlrh\") pod \"install-certs-openstack-openstack-networker-sz9vh\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:42 crc kubenswrapper[4940]: I1126 09:18:42.977054 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:43 crc kubenswrapper[4940]: I1126 09:18:43.482387 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:43 crc kubenswrapper[4940]: I1126 09:18:43.485027 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:43 crc kubenswrapper[4940]: I1126 09:18:43.556905 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:43 crc kubenswrapper[4940]: I1126 09:18:43.557826 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-networker-sz9vh"] Nov 26 09:18:44 crc kubenswrapper[4940]: I1126 09:18:44.549556 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" event={"ID":"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c","Type":"ContainerStarted","Data":"eba255894030fc5d7fbc9f50296f4a396d32287e9f3a3c87d646e62e5939b626"} Nov 26 09:18:44 crc kubenswrapper[4940]: I1126 09:18:44.636228 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:44 crc kubenswrapper[4940]: I1126 09:18:44.716744 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f7jgg"] Nov 26 09:18:45 crc kubenswrapper[4940]: I1126 09:18:45.568623 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" event={"ID":"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c","Type":"ContainerStarted","Data":"174abc17495da4bb492bc68863fe8e6cded179ebdddbffd4588383e51ed65635"} Nov 26 09:18:45 crc kubenswrapper[4940]: I1126 09:18:45.603637 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" podStartSLOduration=2.085727084 podStartE2EDuration="3.60360934s" podCreationTimestamp="2025-11-26 09:18:42 +0000 UTC" firstStartedPulling="2025-11-26 09:18:43.555967833 +0000 UTC m=+8625.076109452" lastFinishedPulling="2025-11-26 09:18:45.073850089 +0000 UTC m=+8626.593991708" observedRunningTime="2025-11-26 09:18:45.592295201 +0000 UTC m=+8627.112436860" watchObservedRunningTime="2025-11-26 09:18:45.60360934 +0000 UTC m=+8627.123750989" Nov 26 09:18:46 crc kubenswrapper[4940]: I1126 09:18:46.580116 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f7jgg" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerName="registry-server" containerID="cri-o://2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30" gracePeriod=2 Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.070210 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.171212 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rctvh\" (UniqueName: \"kubernetes.io/projected/4bda4cba-ac15-4adb-97aa-31250c0fbf76-kube-api-access-rctvh\") pod \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.171507 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-catalog-content\") pod \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.171536 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-utilities\") pod \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\" (UID: \"4bda4cba-ac15-4adb-97aa-31250c0fbf76\") " Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.172600 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-utilities" (OuterVolumeSpecName: "utilities") pod "4bda4cba-ac15-4adb-97aa-31250c0fbf76" (UID: "4bda4cba-ac15-4adb-97aa-31250c0fbf76"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.178136 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bda4cba-ac15-4adb-97aa-31250c0fbf76-kube-api-access-rctvh" (OuterVolumeSpecName: "kube-api-access-rctvh") pod "4bda4cba-ac15-4adb-97aa-31250c0fbf76" (UID: "4bda4cba-ac15-4adb-97aa-31250c0fbf76"). InnerVolumeSpecName "kube-api-access-rctvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.210806 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4bda4cba-ac15-4adb-97aa-31250c0fbf76" (UID: "4bda4cba-ac15-4adb-97aa-31250c0fbf76"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.274217 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.274259 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bda4cba-ac15-4adb-97aa-31250c0fbf76-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.274279 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rctvh\" (UniqueName: \"kubernetes.io/projected/4bda4cba-ac15-4adb-97aa-31250c0fbf76-kube-api-access-rctvh\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.593345 4940 generic.go:334] "Generic (PLEG): container finished" podID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerID="2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30" exitCode=0 Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.593397 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7jgg" event={"ID":"4bda4cba-ac15-4adb-97aa-31250c0fbf76","Type":"ContainerDied","Data":"2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30"} Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.593429 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f7jgg" event={"ID":"4bda4cba-ac15-4adb-97aa-31250c0fbf76","Type":"ContainerDied","Data":"7be3698ea34a6516253e49ee30c977d3cf4fc8af58afa90061c6c1646bf6f098"} Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.593449 4940 scope.go:117] "RemoveContainer" containerID="2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.593454 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f7jgg" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.634500 4940 scope.go:117] "RemoveContainer" containerID="3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.664623 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f7jgg"] Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.671075 4940 scope.go:117] "RemoveContainer" containerID="0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.674778 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f7jgg"] Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.730785 4940 scope.go:117] "RemoveContainer" containerID="2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30" Nov 26 09:18:47 crc kubenswrapper[4940]: E1126 09:18:47.731192 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30\": container with ID starting with 2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30 not found: ID does not exist" containerID="2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.731221 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30"} err="failed to get container status \"2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30\": rpc error: code = NotFound desc = could not find container \"2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30\": container with ID starting with 2111e39a7d74f157759e7603420ea7401e5fbb6aa14781091f499eef14341a30 not found: ID does not exist" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.731243 4940 scope.go:117] "RemoveContainer" containerID="3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a" Nov 26 09:18:47 crc kubenswrapper[4940]: E1126 09:18:47.731913 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a\": container with ID starting with 3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a not found: ID does not exist" containerID="3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.732064 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a"} err="failed to get container status \"3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a\": rpc error: code = NotFound desc = could not find container \"3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a\": container with ID starting with 3b75d0958e12f3152ef9c774f21972c7015351a01ce7bb4a55dbfa8581fa981a not found: ID does not exist" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.732209 4940 scope.go:117] "RemoveContainer" containerID="0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696" Nov 26 09:18:47 crc kubenswrapper[4940]: E1126 09:18:47.732703 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696\": container with ID starting with 0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696 not found: ID does not exist" containerID="0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696" Nov 26 09:18:47 crc kubenswrapper[4940]: I1126 09:18:47.732724 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696"} err="failed to get container status \"0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696\": rpc error: code = NotFound desc = could not find container \"0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696\": container with ID starting with 0981adf7dfc8293002694c05bae7a3a54aff998055c217427ace522ef18e8696 not found: ID does not exist" Nov 26 09:18:49 crc kubenswrapper[4940]: I1126 09:18:49.178489 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" path="/var/lib/kubelet/pods/4bda4cba-ac15-4adb-97aa-31250c0fbf76/volumes" Nov 26 09:18:49 crc kubenswrapper[4940]: I1126 09:18:49.632744 4940 generic.go:334] "Generic (PLEG): container finished" podID="1e234172-c7fe-4989-b902-38458b50674f" containerID="fc77e568d84180426ac639f62e73766ca8facec6d143a2e1c2a5c890d4511319" exitCode=0 Nov 26 09:18:49 crc kubenswrapper[4940]: I1126 09:18:49.632793 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" event={"ID":"1e234172-c7fe-4989-b902-38458b50674f","Type":"ContainerDied","Data":"fc77e568d84180426ac639f62e73766ca8facec6d143a2e1c2a5c890d4511319"} Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.135561 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.195215 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ceph\") pod \"1e234172-c7fe-4989-b902-38458b50674f\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.195267 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkln4\" (UniqueName: \"kubernetes.io/projected/1e234172-c7fe-4989-b902-38458b50674f-kube-api-access-wkln4\") pod \"1e234172-c7fe-4989-b902-38458b50674f\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.195357 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ssh-key\") pod \"1e234172-c7fe-4989-b902-38458b50674f\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.195390 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-inventory\") pod \"1e234172-c7fe-4989-b902-38458b50674f\" (UID: \"1e234172-c7fe-4989-b902-38458b50674f\") " Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.202298 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ceph" (OuterVolumeSpecName: "ceph") pod "1e234172-c7fe-4989-b902-38458b50674f" (UID: "1e234172-c7fe-4989-b902-38458b50674f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.225896 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e234172-c7fe-4989-b902-38458b50674f-kube-api-access-wkln4" (OuterVolumeSpecName: "kube-api-access-wkln4") pod "1e234172-c7fe-4989-b902-38458b50674f" (UID: "1e234172-c7fe-4989-b902-38458b50674f"). InnerVolumeSpecName "kube-api-access-wkln4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.248620 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1e234172-c7fe-4989-b902-38458b50674f" (UID: "1e234172-c7fe-4989-b902-38458b50674f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.255345 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-inventory" (OuterVolumeSpecName: "inventory") pod "1e234172-c7fe-4989-b902-38458b50674f" (UID: "1e234172-c7fe-4989-b902-38458b50674f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.297287 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.297322 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.297332 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1e234172-c7fe-4989-b902-38458b50674f-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.297343 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkln4\" (UniqueName: \"kubernetes.io/projected/1e234172-c7fe-4989-b902-38458b50674f-kube-api-access-wkln4\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.671374 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" event={"ID":"1e234172-c7fe-4989-b902-38458b50674f","Type":"ContainerDied","Data":"aad0412d8f60a48f93a34fc97f5ffb1936db5f45ad2489005970e7e6ec1a567d"} Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.671422 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aad0412d8f60a48f93a34fc97f5ffb1936db5f45ad2489005970e7e6ec1a567d" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.671487 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-tkxr6" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.743580 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-v4djb"] Nov 26 09:18:51 crc kubenswrapper[4940]: E1126 09:18:51.744138 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerName="registry-server" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.744157 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerName="registry-server" Nov 26 09:18:51 crc kubenswrapper[4940]: E1126 09:18:51.744178 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerName="extract-utilities" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.744187 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerName="extract-utilities" Nov 26 09:18:51 crc kubenswrapper[4940]: E1126 09:18:51.744202 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e234172-c7fe-4989-b902-38458b50674f" containerName="run-os-openstack-openstack-cell1" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.744212 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e234172-c7fe-4989-b902-38458b50674f" containerName="run-os-openstack-openstack-cell1" Nov 26 09:18:51 crc kubenswrapper[4940]: E1126 09:18:51.744249 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerName="extract-content" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.744256 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerName="extract-content" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.744544 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e234172-c7fe-4989-b902-38458b50674f" containerName="run-os-openstack-openstack-cell1" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.744575 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bda4cba-ac15-4adb-97aa-31250c0fbf76" containerName="registry-server" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.745516 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.751502 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.751830 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.759232 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-v4djb"] Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.809843 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.809921 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ceph\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.810000 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jncr\" (UniqueName: \"kubernetes.io/projected/38de9225-517b-45c9-8f18-efad998ca841-kube-api-access-2jncr\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.810104 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-inventory\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.911607 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.911667 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ceph\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.911704 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jncr\" (UniqueName: \"kubernetes.io/projected/38de9225-517b-45c9-8f18-efad998ca841-kube-api-access-2jncr\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.911736 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-inventory\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.916150 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-inventory\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.916183 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.916248 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ceph\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:51 crc kubenswrapper[4940]: I1126 09:18:51.931664 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jncr\" (UniqueName: \"kubernetes.io/projected/38de9225-517b-45c9-8f18-efad998ca841-kube-api-access-2jncr\") pod \"reboot-os-openstack-openstack-cell1-v4djb\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:52 crc kubenswrapper[4940]: I1126 09:18:52.070944 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:18:52 crc kubenswrapper[4940]: I1126 09:18:52.730201 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-v4djb"] Nov 26 09:18:53 crc kubenswrapper[4940]: I1126 09:18:53.692686 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" event={"ID":"38de9225-517b-45c9-8f18-efad998ca841","Type":"ContainerStarted","Data":"b8ab1ef621eb21ed7989db0c6854ab770b87f7ae40af9878db6d671ce7d82e77"} Nov 26 09:18:53 crc kubenswrapper[4940]: I1126 09:18:53.693016 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" event={"ID":"38de9225-517b-45c9-8f18-efad998ca841","Type":"ContainerStarted","Data":"68f4a3b181cb34faa194c41a2a30c4746bd9f593f2e756e9d17f95ed8c52a803"} Nov 26 09:18:53 crc kubenswrapper[4940]: I1126 09:18:53.711190 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" podStartSLOduration=2.256652338 podStartE2EDuration="2.711170897s" podCreationTimestamp="2025-11-26 09:18:51 +0000 UTC" firstStartedPulling="2025-11-26 09:18:52.73361392 +0000 UTC m=+8634.253755539" lastFinishedPulling="2025-11-26 09:18:53.188132469 +0000 UTC m=+8634.708274098" observedRunningTime="2025-11-26 09:18:53.707929854 +0000 UTC m=+8635.228071473" watchObservedRunningTime="2025-11-26 09:18:53.711170897 +0000 UTC m=+8635.231312526" Nov 26 09:18:55 crc kubenswrapper[4940]: I1126 09:18:55.717859 4940 generic.go:334] "Generic (PLEG): container finished" podID="2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" containerID="174abc17495da4bb492bc68863fe8e6cded179ebdddbffd4588383e51ed65635" exitCode=0 Nov 26 09:18:55 crc kubenswrapper[4940]: I1126 09:18:55.717924 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" event={"ID":"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c","Type":"ContainerDied","Data":"174abc17495da4bb492bc68863fe8e6cded179ebdddbffd4588383e51ed65635"} Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.300416 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.340772 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-inventory\") pod \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.341023 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ovn-combined-ca-bundle\") pod \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.341106 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-neutron-metadata-combined-ca-bundle\") pod \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.341138 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ssh-key\") pod \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.341155 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-bootstrap-combined-ca-bundle\") pod \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.341174 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mlrh\" (UniqueName: \"kubernetes.io/projected/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-kube-api-access-8mlrh\") pod \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\" (UID: \"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c\") " Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.350089 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" (UID: "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.350101 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-kube-api-access-8mlrh" (OuterVolumeSpecName: "kube-api-access-8mlrh") pod "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" (UID: "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c"). InnerVolumeSpecName "kube-api-access-8mlrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.363359 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" (UID: "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.363653 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" (UID: "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.375651 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" (UID: "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.382195 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-inventory" (OuterVolumeSpecName: "inventory") pod "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" (UID: "2e0c436e-dfcf-4eb0-92b6-1e210a026d8c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.443073 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.443106 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.443117 4940 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.443129 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mlrh\" (UniqueName: \"kubernetes.io/projected/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-kube-api-access-8mlrh\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.443139 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.443148 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0c436e-dfcf-4eb0-92b6-1e210a026d8c-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.742534 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" event={"ID":"2e0c436e-dfcf-4eb0-92b6-1e210a026d8c","Type":"ContainerDied","Data":"eba255894030fc5d7fbc9f50296f4a396d32287e9f3a3c87d646e62e5939b626"} Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.742571 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-networker-sz9vh" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.742581 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eba255894030fc5d7fbc9f50296f4a396d32287e9f3a3c87d646e62e5939b626" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.818737 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-networker-khrbz"] Nov 26 09:18:57 crc kubenswrapper[4940]: E1126 09:18:57.819199 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" containerName="install-certs-openstack-openstack-networker" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.819215 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" containerName="install-certs-openstack-openstack-networker" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.819448 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e0c436e-dfcf-4eb0-92b6-1e210a026d8c" containerName="install-certs-openstack-openstack-networker" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.820280 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.822860 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.823147 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.829598 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.836950 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-networker-khrbz"] Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.953351 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.953415 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovncontroller-config-0\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.953449 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq7hp\" (UniqueName: \"kubernetes.io/projected/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-kube-api-access-xq7hp\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.953563 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ssh-key\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:57 crc kubenswrapper[4940]: I1126 09:18:57.953633 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-inventory\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.055105 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq7hp\" (UniqueName: \"kubernetes.io/projected/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-kube-api-access-xq7hp\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.055463 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ssh-key\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.055607 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-inventory\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.055753 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.055908 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovncontroller-config-0\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.056894 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovncontroller-config-0\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.063734 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.063739 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ssh-key\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.063988 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-inventory\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.075102 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq7hp\" (UniqueName: \"kubernetes.io/projected/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-kube-api-access-xq7hp\") pod \"ovn-openstack-openstack-networker-khrbz\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.138156 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.699622 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-networker-khrbz"] Nov 26 09:18:58 crc kubenswrapper[4940]: I1126 09:18:58.755114 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-networker-khrbz" event={"ID":"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a","Type":"ContainerStarted","Data":"1e84dffab34554a25281067dda247f427a9c3475ce081197fd3a647b99fed4ee"} Nov 26 09:18:59 crc kubenswrapper[4940]: I1126 09:18:59.767433 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-networker-khrbz" event={"ID":"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a","Type":"ContainerStarted","Data":"3670c9710f8187e1510bfea1a3c6f398ca6e706abbf97b422f0c7f6a7c17beae"} Nov 26 09:18:59 crc kubenswrapper[4940]: I1126 09:18:59.793822 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-networker-khrbz" podStartSLOduration=2.374603803 podStartE2EDuration="2.79380117s" podCreationTimestamp="2025-11-26 09:18:57 +0000 UTC" firstStartedPulling="2025-11-26 09:18:58.709085316 +0000 UTC m=+8640.229226925" lastFinishedPulling="2025-11-26 09:18:59.128282633 +0000 UTC m=+8640.648424292" observedRunningTime="2025-11-26 09:18:59.793716407 +0000 UTC m=+8641.313858026" watchObservedRunningTime="2025-11-26 09:18:59.79380117 +0000 UTC m=+8641.313942799" Nov 26 09:19:08 crc kubenswrapper[4940]: I1126 09:19:08.372178 4940 generic.go:334] "Generic (PLEG): container finished" podID="38de9225-517b-45c9-8f18-efad998ca841" containerID="b8ab1ef621eb21ed7989db0c6854ab770b87f7ae40af9878db6d671ce7d82e77" exitCode=0 Nov 26 09:19:08 crc kubenswrapper[4940]: I1126 09:19:08.372277 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" event={"ID":"38de9225-517b-45c9-8f18-efad998ca841","Type":"ContainerDied","Data":"b8ab1ef621eb21ed7989db0c6854ab770b87f7ae40af9878db6d671ce7d82e77"} Nov 26 09:19:09 crc kubenswrapper[4940]: I1126 09:19:09.854472 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.010849 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jncr\" (UniqueName: \"kubernetes.io/projected/38de9225-517b-45c9-8f18-efad998ca841-kube-api-access-2jncr\") pod \"38de9225-517b-45c9-8f18-efad998ca841\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.010921 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ssh-key\") pod \"38de9225-517b-45c9-8f18-efad998ca841\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.010969 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-inventory\") pod \"38de9225-517b-45c9-8f18-efad998ca841\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.011002 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ceph\") pod \"38de9225-517b-45c9-8f18-efad998ca841\" (UID: \"38de9225-517b-45c9-8f18-efad998ca841\") " Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.017024 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38de9225-517b-45c9-8f18-efad998ca841-kube-api-access-2jncr" (OuterVolumeSpecName: "kube-api-access-2jncr") pod "38de9225-517b-45c9-8f18-efad998ca841" (UID: "38de9225-517b-45c9-8f18-efad998ca841"). InnerVolumeSpecName "kube-api-access-2jncr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.017128 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ceph" (OuterVolumeSpecName: "ceph") pod "38de9225-517b-45c9-8f18-efad998ca841" (UID: "38de9225-517b-45c9-8f18-efad998ca841"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.044243 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-inventory" (OuterVolumeSpecName: "inventory") pod "38de9225-517b-45c9-8f18-efad998ca841" (UID: "38de9225-517b-45c9-8f18-efad998ca841"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.056898 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "38de9225-517b-45c9-8f18-efad998ca841" (UID: "38de9225-517b-45c9-8f18-efad998ca841"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.113909 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jncr\" (UniqueName: \"kubernetes.io/projected/38de9225-517b-45c9-8f18-efad998ca841-kube-api-access-2jncr\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.113951 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.113964 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.113976 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38de9225-517b-45c9-8f18-efad998ca841-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.399301 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" event={"ID":"38de9225-517b-45c9-8f18-efad998ca841","Type":"ContainerDied","Data":"68f4a3b181cb34faa194c41a2a30c4746bd9f593f2e756e9d17f95ed8c52a803"} Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.399556 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68f4a3b181cb34faa194c41a2a30c4746bd9f593f2e756e9d17f95ed8c52a803" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.399424 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-v4djb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.481157 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-cr4hb"] Nov 26 09:19:10 crc kubenswrapper[4940]: E1126 09:19:10.481610 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38de9225-517b-45c9-8f18-efad998ca841" containerName="reboot-os-openstack-openstack-cell1" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.481627 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="38de9225-517b-45c9-8f18-efad998ca841" containerName="reboot-os-openstack-openstack-cell1" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.481857 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="38de9225-517b-45c9-8f18-efad998ca841" containerName="reboot-os-openstack-openstack-cell1" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.482642 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.484991 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.485553 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.492607 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-cr4hb"] Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.626865 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ssh-key\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.626904 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ceph\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627060 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627254 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627321 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627497 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627585 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-inventory\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627750 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627782 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627870 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627938 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.627993 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzj8r\" (UniqueName: \"kubernetes.io/projected/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-kube-api-access-lzj8r\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730168 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730285 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730352 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzj8r\" (UniqueName: \"kubernetes.io/projected/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-kube-api-access-lzj8r\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730403 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ssh-key\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730444 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ceph\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730496 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730574 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730616 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730712 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730779 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-inventory\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730883 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.730940 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.735004 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.735079 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.738494 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.738836 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.738845 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.739174 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.740095 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ssh-key\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.740429 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.740666 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ceph\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.751757 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-inventory\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.752136 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.766315 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzj8r\" (UniqueName: \"kubernetes.io/projected/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-kube-api-access-lzj8r\") pod \"install-certs-openstack-openstack-cell1-cr4hb\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:10 crc kubenswrapper[4940]: I1126 09:19:10.807951 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:11 crc kubenswrapper[4940]: I1126 09:19:11.376073 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-cr4hb"] Nov 26 09:19:11 crc kubenswrapper[4940]: W1126 09:19:11.388203 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bdf34ec_75e0_4692_ace7_b208f4ddeaec.slice/crio-2f753510d8175dc1b3c0dad3a090704902e2a8fbfde5a282152c9b1753c261db WatchSource:0}: Error finding container 2f753510d8175dc1b3c0dad3a090704902e2a8fbfde5a282152c9b1753c261db: Status 404 returned error can't find the container with id 2f753510d8175dc1b3c0dad3a090704902e2a8fbfde5a282152c9b1753c261db Nov 26 09:19:11 crc kubenswrapper[4940]: I1126 09:19:11.411887 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" event={"ID":"0bdf34ec-75e0-4692-ace7-b208f4ddeaec","Type":"ContainerStarted","Data":"2f753510d8175dc1b3c0dad3a090704902e2a8fbfde5a282152c9b1753c261db"} Nov 26 09:19:13 crc kubenswrapper[4940]: I1126 09:19:13.447330 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" event={"ID":"0bdf34ec-75e0-4692-ace7-b208f4ddeaec","Type":"ContainerStarted","Data":"814b93eaed8186037ead3e4a22a5fad00242e053833a431ffb1d10915eec8e89"} Nov 26 09:19:13 crc kubenswrapper[4940]: I1126 09:19:13.474028 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" podStartSLOduration=2.146844505 podStartE2EDuration="3.474011777s" podCreationTimestamp="2025-11-26 09:19:10 +0000 UTC" firstStartedPulling="2025-11-26 09:19:11.391246554 +0000 UTC m=+8652.911388173" lastFinishedPulling="2025-11-26 09:19:12.718413786 +0000 UTC m=+8654.238555445" observedRunningTime="2025-11-26 09:19:13.471471537 +0000 UTC m=+8654.991613166" watchObservedRunningTime="2025-11-26 09:19:13.474011777 +0000 UTC m=+8654.994153396" Nov 26 09:19:32 crc kubenswrapper[4940]: I1126 09:19:32.659646 4940 generic.go:334] "Generic (PLEG): container finished" podID="0bdf34ec-75e0-4692-ace7-b208f4ddeaec" containerID="814b93eaed8186037ead3e4a22a5fad00242e053833a431ffb1d10915eec8e89" exitCode=0 Nov 26 09:19:32 crc kubenswrapper[4940]: I1126 09:19:32.659717 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" event={"ID":"0bdf34ec-75e0-4692-ace7-b208f4ddeaec","Type":"ContainerDied","Data":"814b93eaed8186037ead3e4a22a5fad00242e053833a431ffb1d10915eec8e89"} Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.185611 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.343768 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-metadata-combined-ca-bundle\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.343824 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-inventory\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.343867 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-bootstrap-combined-ca-bundle\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.343904 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzj8r\" (UniqueName: \"kubernetes.io/projected/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-kube-api-access-lzj8r\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.343956 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-libvirt-combined-ca-bundle\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.344094 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-sriov-combined-ca-bundle\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.344124 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ssh-key\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.345035 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ceph\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.345088 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-dhcp-combined-ca-bundle\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.345177 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-nova-combined-ca-bundle\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.345204 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ovn-combined-ca-bundle\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.345279 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-telemetry-combined-ca-bundle\") pod \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\" (UID: \"0bdf34ec-75e0-4692-ace7-b208f4ddeaec\") " Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.349745 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.350229 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.350944 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.351306 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.354459 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-kube-api-access-lzj8r" (OuterVolumeSpecName: "kube-api-access-lzj8r") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "kube-api-access-lzj8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.354895 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.354923 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.355488 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ceph" (OuterVolumeSpecName: "ceph") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.356164 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.356275 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.378587 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.379207 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-inventory" (OuterVolumeSpecName: "inventory") pod "0bdf34ec-75e0-4692-ace7-b208f4ddeaec" (UID: "0bdf34ec-75e0-4692-ace7-b208f4ddeaec"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447635 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447674 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447687 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447695 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447710 4940 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447721 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447730 4940 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447740 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447748 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447758 4940 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447769 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzj8r\" (UniqueName: \"kubernetes.io/projected/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-kube-api-access-lzj8r\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.447779 4940 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bdf34ec-75e0-4692-ace7-b208f4ddeaec-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.682688 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" event={"ID":"0bdf34ec-75e0-4692-ace7-b208f4ddeaec","Type":"ContainerDied","Data":"2f753510d8175dc1b3c0dad3a090704902e2a8fbfde5a282152c9b1753c261db"} Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.682754 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f753510d8175dc1b3c0dad3a090704902e2a8fbfde5a282152c9b1753c261db" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.683157 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-cr4hb" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.804611 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-zrsks"] Nov 26 09:19:34 crc kubenswrapper[4940]: E1126 09:19:34.805526 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bdf34ec-75e0-4692-ace7-b208f4ddeaec" containerName="install-certs-openstack-openstack-cell1" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.805544 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bdf34ec-75e0-4692-ace7-b208f4ddeaec" containerName="install-certs-openstack-openstack-cell1" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.805723 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bdf34ec-75e0-4692-ace7-b208f4ddeaec" containerName="install-certs-openstack-openstack-cell1" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.806489 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.812438 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.812476 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.847946 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-zrsks"] Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.955918 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ceph\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.956073 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-inventory\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.956333 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mr5xt\" (UniqueName: \"kubernetes.io/projected/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-kube-api-access-mr5xt\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:34 crc kubenswrapper[4940]: I1126 09:19:34.956543 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.058136 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-inventory\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.058247 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mr5xt\" (UniqueName: \"kubernetes.io/projected/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-kube-api-access-mr5xt\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.058291 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.058377 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ceph\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.062688 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ceph\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.063105 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.064610 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-inventory\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.087710 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mr5xt\" (UniqueName: \"kubernetes.io/projected/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-kube-api-access-mr5xt\") pod \"ceph-client-openstack-openstack-cell1-zrsks\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.148967 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.675747 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-zrsks"] Nov 26 09:19:35 crc kubenswrapper[4940]: I1126 09:19:35.695508 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" event={"ID":"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725","Type":"ContainerStarted","Data":"5af5b82e183d9945e17e343eccad16356f00e1a6edf4d659532cd73e1360e0bf"} Nov 26 09:19:36 crc kubenswrapper[4940]: I1126 09:19:36.705701 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" event={"ID":"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725","Type":"ContainerStarted","Data":"4fc4d6344e0ec3379c346671ea64b75a690c8e2be52687c061bbccde62d14821"} Nov 26 09:19:36 crc kubenswrapper[4940]: I1126 09:19:36.726293 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" podStartSLOduration=2.157700392 podStartE2EDuration="2.726271188s" podCreationTimestamp="2025-11-26 09:19:34 +0000 UTC" firstStartedPulling="2025-11-26 09:19:35.678944162 +0000 UTC m=+8677.199085781" lastFinishedPulling="2025-11-26 09:19:36.247514948 +0000 UTC m=+8677.767656577" observedRunningTime="2025-11-26 09:19:36.720400201 +0000 UTC m=+8678.240541840" watchObservedRunningTime="2025-11-26 09:19:36.726271188 +0000 UTC m=+8678.246412807" Nov 26 09:19:41 crc kubenswrapper[4940]: I1126 09:19:41.778532 4940 generic.go:334] "Generic (PLEG): container finished" podID="07af1a08-7b4e-4c4b-b2d5-6f716d5ae725" containerID="4fc4d6344e0ec3379c346671ea64b75a690c8e2be52687c061bbccde62d14821" exitCode=0 Nov 26 09:19:41 crc kubenswrapper[4940]: I1126 09:19:41.778614 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" event={"ID":"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725","Type":"ContainerDied","Data":"4fc4d6344e0ec3379c346671ea64b75a690c8e2be52687c061bbccde62d14821"} Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.278870 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.347773 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ssh-key\") pod \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.347866 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ceph\") pod \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.348018 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mr5xt\" (UniqueName: \"kubernetes.io/projected/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-kube-api-access-mr5xt\") pod \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.348143 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-inventory\") pod \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\" (UID: \"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725\") " Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.356301 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ceph" (OuterVolumeSpecName: "ceph") pod "07af1a08-7b4e-4c4b-b2d5-6f716d5ae725" (UID: "07af1a08-7b4e-4c4b-b2d5-6f716d5ae725"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.363296 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-kube-api-access-mr5xt" (OuterVolumeSpecName: "kube-api-access-mr5xt") pod "07af1a08-7b4e-4c4b-b2d5-6f716d5ae725" (UID: "07af1a08-7b4e-4c4b-b2d5-6f716d5ae725"). InnerVolumeSpecName "kube-api-access-mr5xt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.380690 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "07af1a08-7b4e-4c4b-b2d5-6f716d5ae725" (UID: "07af1a08-7b4e-4c4b-b2d5-6f716d5ae725"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.403236 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-inventory" (OuterVolumeSpecName: "inventory") pod "07af1a08-7b4e-4c4b-b2d5-6f716d5ae725" (UID: "07af1a08-7b4e-4c4b-b2d5-6f716d5ae725"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.452219 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mr5xt\" (UniqueName: \"kubernetes.io/projected/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-kube-api-access-mr5xt\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.452631 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.452754 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.452825 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07af1a08-7b4e-4c4b-b2d5-6f716d5ae725-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.799774 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" event={"ID":"07af1a08-7b4e-4c4b-b2d5-6f716d5ae725","Type":"ContainerDied","Data":"5af5b82e183d9945e17e343eccad16356f00e1a6edf4d659532cd73e1360e0bf"} Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.799837 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5af5b82e183d9945e17e343eccad16356f00e1a6edf4d659532cd73e1360e0bf" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.799840 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-zrsks" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.906411 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-22fh4"] Nov 26 09:19:43 crc kubenswrapper[4940]: E1126 09:19:43.907262 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07af1a08-7b4e-4c4b-b2d5-6f716d5ae725" containerName="ceph-client-openstack-openstack-cell1" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.907282 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="07af1a08-7b4e-4c4b-b2d5-6f716d5ae725" containerName="ceph-client-openstack-openstack-cell1" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.907684 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="07af1a08-7b4e-4c4b-b2d5-6f716d5ae725" containerName="ceph-client-openstack-openstack-cell1" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.908566 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.914958 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.915479 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:19:43 crc kubenswrapper[4940]: I1126 09:19:43.924536 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-22fh4"] Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.064729 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.064907 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jmzk\" (UniqueName: \"kubernetes.io/projected/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-kube-api-access-9jmzk\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.064962 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.065194 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ssh-key\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.065315 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ceph\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.065353 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-inventory\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.166873 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jmzk\" (UniqueName: \"kubernetes.io/projected/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-kube-api-access-9jmzk\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.166921 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.166981 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ssh-key\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.167028 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ceph\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.167070 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-inventory\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.167143 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.168719 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.172635 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-inventory\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.172758 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ceph\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.173096 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.173693 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ssh-key\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.188267 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jmzk\" (UniqueName: \"kubernetes.io/projected/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-kube-api-access-9jmzk\") pod \"ovn-openstack-openstack-cell1-22fh4\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.240413 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:19:44 crc kubenswrapper[4940]: I1126 09:19:44.831926 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-22fh4"] Nov 26 09:19:45 crc kubenswrapper[4940]: I1126 09:19:45.846795 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-22fh4" event={"ID":"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a","Type":"ContainerStarted","Data":"555acd5f3821fd7ab603db3fc817c7ae9fa4b41a8ad6e4233215cec29ec5d926"} Nov 26 09:19:45 crc kubenswrapper[4940]: I1126 09:19:45.847243 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-22fh4" event={"ID":"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a","Type":"ContainerStarted","Data":"dc701530e4e442fb1b2470c9f2fd318e3fcf7b1508a61d2a48a4a4ae8e1f9d60"} Nov 26 09:19:45 crc kubenswrapper[4940]: I1126 09:19:45.888161 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-cell1-22fh4" podStartSLOduration=2.269149294 podStartE2EDuration="2.888141212s" podCreationTimestamp="2025-11-26 09:19:43 +0000 UTC" firstStartedPulling="2025-11-26 09:19:44.828611019 +0000 UTC m=+8686.348752638" lastFinishedPulling="2025-11-26 09:19:45.447602927 +0000 UTC m=+8686.967744556" observedRunningTime="2025-11-26 09:19:45.879606091 +0000 UTC m=+8687.399747710" watchObservedRunningTime="2025-11-26 09:19:45.888141212 +0000 UTC m=+8687.408282831" Nov 26 09:20:20 crc kubenswrapper[4940]: I1126 09:20:20.245882 4940 generic.go:334] "Generic (PLEG): container finished" podID="c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" containerID="3670c9710f8187e1510bfea1a3c6f398ca6e706abbf97b422f0c7f6a7c17beae" exitCode=0 Nov 26 09:20:20 crc kubenswrapper[4940]: I1126 09:20:20.245970 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-networker-khrbz" event={"ID":"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a","Type":"ContainerDied","Data":"3670c9710f8187e1510bfea1a3c6f398ca6e706abbf97b422f0c7f6a7c17beae"} Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.041138 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.164152 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovncontroller-config-0\") pod \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.164539 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovn-combined-ca-bundle\") pod \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.164735 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-inventory\") pod \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.164948 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ssh-key\") pod \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.165101 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq7hp\" (UniqueName: \"kubernetes.io/projected/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-kube-api-access-xq7hp\") pod \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\" (UID: \"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a\") " Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.171367 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" (UID: "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.179630 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-kube-api-access-xq7hp" (OuterVolumeSpecName: "kube-api-access-xq7hp") pod "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" (UID: "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a"). InnerVolumeSpecName "kube-api-access-xq7hp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.195854 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" (UID: "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.196741 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" (UID: "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.197011 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-inventory" (OuterVolumeSpecName: "inventory") pod "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" (UID: "c4653b1a-ca1d-4cdd-8279-fb05af4ee21a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.268575 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-networker-khrbz" event={"ID":"c4653b1a-ca1d-4cdd-8279-fb05af4ee21a","Type":"ContainerDied","Data":"1e84dffab34554a25281067dda247f427a9c3475ce081197fd3a647b99fed4ee"} Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.268971 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e84dffab34554a25281067dda247f427a9c3475ce081197fd3a647b99fed4ee" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.268653 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-networker-khrbz" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.269684 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.270848 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq7hp\" (UniqueName: \"kubernetes.io/projected/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-kube-api-access-xq7hp\") on node \"crc\" DevicePath \"\"" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.270876 4940 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.270930 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.270940 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4653b1a-ca1d-4cdd-8279-fb05af4ee21a-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.367120 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-networker-lc79g"] Nov 26 09:20:22 crc kubenswrapper[4940]: E1126 09:20:22.367647 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" containerName="ovn-openstack-openstack-networker" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.367665 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" containerName="ovn-openstack-openstack-networker" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.367897 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4653b1a-ca1d-4cdd-8279-fb05af4ee21a" containerName="ovn-openstack-openstack-networker" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.368698 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.372381 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-networker" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.372510 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.372537 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.373872 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-networker-dockercfg-n7fgn" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.395832 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-networker-lc79g"] Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.475561 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-ssh-key\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.475642 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.475729 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.475782 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-inventory\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.475999 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.476235 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mprtt\" (UniqueName: \"kubernetes.io/projected/420f7d01-ed55-46a9-970a-fbd4beff5c75-kube-api-access-mprtt\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.578432 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.578515 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-inventory\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.578593 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.578646 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mprtt\" (UniqueName: \"kubernetes.io/projected/420f7d01-ed55-46a9-970a-fbd4beff5c75-kube-api-access-mprtt\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.578714 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-ssh-key\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.578759 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.583184 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.585123 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.585717 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.586761 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-ssh-key\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.594152 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-inventory\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.605539 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mprtt\" (UniqueName: \"kubernetes.io/projected/420f7d01-ed55-46a9-970a-fbd4beff5c75-kube-api-access-mprtt\") pod \"neutron-metadata-openstack-openstack-networker-lc79g\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:22 crc kubenswrapper[4940]: I1126 09:20:22.698895 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:20:23 crc kubenswrapper[4940]: I1126 09:20:23.291020 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-networker-lc79g"] Nov 26 09:20:23 crc kubenswrapper[4940]: W1126 09:20:23.894232 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod420f7d01_ed55_46a9_970a_fbd4beff5c75.slice/crio-dbce6784501aab2bbf4a27bfaa192a4e87e43261640e63ad7b761f25896facb4 WatchSource:0}: Error finding container dbce6784501aab2bbf4a27bfaa192a4e87e43261640e63ad7b761f25896facb4: Status 404 returned error can't find the container with id dbce6784501aab2bbf4a27bfaa192a4e87e43261640e63ad7b761f25896facb4 Nov 26 09:20:23 crc kubenswrapper[4940]: I1126 09:20:23.899881 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:20:24 crc kubenswrapper[4940]: I1126 09:20:24.290782 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" event={"ID":"420f7d01-ed55-46a9-970a-fbd4beff5c75","Type":"ContainerStarted","Data":"dbce6784501aab2bbf4a27bfaa192a4e87e43261640e63ad7b761f25896facb4"} Nov 26 09:20:25 crc kubenswrapper[4940]: I1126 09:20:25.301809 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" event={"ID":"420f7d01-ed55-46a9-970a-fbd4beff5c75","Type":"ContainerStarted","Data":"e7ec8c54876d39b2fa4b97eea5fed30d6db9fcbc0515f9189455d5d54c5bb72f"} Nov 26 09:20:25 crc kubenswrapper[4940]: I1126 09:20:25.327551 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" podStartSLOduration=2.803567888 podStartE2EDuration="3.327517928s" podCreationTimestamp="2025-11-26 09:20:22 +0000 UTC" firstStartedPulling="2025-11-26 09:20:23.899540467 +0000 UTC m=+8725.419682086" lastFinishedPulling="2025-11-26 09:20:24.423490507 +0000 UTC m=+8725.943632126" observedRunningTime="2025-11-26 09:20:25.324033708 +0000 UTC m=+8726.844175347" watchObservedRunningTime="2025-11-26 09:20:25.327517928 +0000 UTC m=+8726.847659547" Nov 26 09:20:46 crc kubenswrapper[4940]: I1126 09:20:46.976738 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nphg8"] Nov 26 09:20:46 crc kubenswrapper[4940]: I1126 09:20:46.979479 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:46 crc kubenswrapper[4940]: I1126 09:20:46.996383 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nphg8"] Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.114569 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k9pr\" (UniqueName: \"kubernetes.io/projected/b85f10fa-4a75-4529-8d11-912daacbf115-kube-api-access-6k9pr\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.115116 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-utilities\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.115203 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-catalog-content\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.216945 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-utilities\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.217146 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-catalog-content\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.217235 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k9pr\" (UniqueName: \"kubernetes.io/projected/b85f10fa-4a75-4529-8d11-912daacbf115-kube-api-access-6k9pr\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.217443 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-utilities\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.217631 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-catalog-content\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.237872 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k9pr\" (UniqueName: \"kubernetes.io/projected/b85f10fa-4a75-4529-8d11-912daacbf115-kube-api-access-6k9pr\") pod \"community-operators-nphg8\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.316685 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:47 crc kubenswrapper[4940]: I1126 09:20:47.847545 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nphg8"] Nov 26 09:20:48 crc kubenswrapper[4940]: I1126 09:20:48.552563 4940 generic.go:334] "Generic (PLEG): container finished" podID="b85f10fa-4a75-4529-8d11-912daacbf115" containerID="84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69" exitCode=0 Nov 26 09:20:48 crc kubenswrapper[4940]: I1126 09:20:48.552611 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nphg8" event={"ID":"b85f10fa-4a75-4529-8d11-912daacbf115","Type":"ContainerDied","Data":"84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69"} Nov 26 09:20:48 crc kubenswrapper[4940]: I1126 09:20:48.552870 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nphg8" event={"ID":"b85f10fa-4a75-4529-8d11-912daacbf115","Type":"ContainerStarted","Data":"50e10e13968291805417fa7fdaf224dd6aee0c77019c0b61feca81314bcb10b4"} Nov 26 09:20:50 crc kubenswrapper[4940]: I1126 09:20:50.575550 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nphg8" event={"ID":"b85f10fa-4a75-4529-8d11-912daacbf115","Type":"ContainerStarted","Data":"ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa"} Nov 26 09:20:51 crc kubenswrapper[4940]: I1126 09:20:51.590649 4940 generic.go:334] "Generic (PLEG): container finished" podID="b85f10fa-4a75-4529-8d11-912daacbf115" containerID="ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa" exitCode=0 Nov 26 09:20:51 crc kubenswrapper[4940]: I1126 09:20:51.590729 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nphg8" event={"ID":"b85f10fa-4a75-4529-8d11-912daacbf115","Type":"ContainerDied","Data":"ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa"} Nov 26 09:20:51 crc kubenswrapper[4940]: I1126 09:20:51.728315 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:20:51 crc kubenswrapper[4940]: I1126 09:20:51.728385 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:20:52 crc kubenswrapper[4940]: I1126 09:20:52.604731 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nphg8" event={"ID":"b85f10fa-4a75-4529-8d11-912daacbf115","Type":"ContainerStarted","Data":"886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59"} Nov 26 09:20:52 crc kubenswrapper[4940]: I1126 09:20:52.629997 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nphg8" podStartSLOduration=3.022541099 podStartE2EDuration="6.629973024s" podCreationTimestamp="2025-11-26 09:20:46 +0000 UTC" firstStartedPulling="2025-11-26 09:20:48.554567877 +0000 UTC m=+8750.074709496" lastFinishedPulling="2025-11-26 09:20:52.161999802 +0000 UTC m=+8753.682141421" observedRunningTime="2025-11-26 09:20:52.628467686 +0000 UTC m=+8754.148609305" watchObservedRunningTime="2025-11-26 09:20:52.629973024 +0000 UTC m=+8754.150114643" Nov 26 09:20:57 crc kubenswrapper[4940]: I1126 09:20:57.317810 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:57 crc kubenswrapper[4940]: I1126 09:20:57.320281 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:20:57 crc kubenswrapper[4940]: E1126 09:20:57.343376 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9675a4b8_6cd8_4b62_a28d_73542c5d1b2a.slice/crio-conmon-555acd5f3821fd7ab603db3fc817c7ae9fa4b41a8ad6e4233215cec29ec5d926.scope\": RecentStats: unable to find data in memory cache]" Nov 26 09:20:57 crc kubenswrapper[4940]: I1126 09:20:57.661609 4940 generic.go:334] "Generic (PLEG): container finished" podID="9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" containerID="555acd5f3821fd7ab603db3fc817c7ae9fa4b41a8ad6e4233215cec29ec5d926" exitCode=0 Nov 26 09:20:57 crc kubenswrapper[4940]: I1126 09:20:57.661723 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-22fh4" event={"ID":"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a","Type":"ContainerDied","Data":"555acd5f3821fd7ab603db3fc817c7ae9fa4b41a8ad6e4233215cec29ec5d926"} Nov 26 09:20:58 crc kubenswrapper[4940]: I1126 09:20:58.395752 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nphg8" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="registry-server" probeResult="failure" output=< Nov 26 09:20:58 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:20:58 crc kubenswrapper[4940]: > Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.333466 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.437131 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ssh-key\") pod \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.437226 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-inventory\") pod \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.437273 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovncontroller-config-0\") pod \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.437338 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jmzk\" (UniqueName: \"kubernetes.io/projected/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-kube-api-access-9jmzk\") pod \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.437452 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ceph\") pod \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.437494 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovn-combined-ca-bundle\") pod \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\" (UID: \"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a\") " Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.443184 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" (UID: "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.447136 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-kube-api-access-9jmzk" (OuterVolumeSpecName: "kube-api-access-9jmzk") pod "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" (UID: "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a"). InnerVolumeSpecName "kube-api-access-9jmzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.447781 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ceph" (OuterVolumeSpecName: "ceph") pod "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" (UID: "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.471436 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" (UID: "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.471497 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-inventory" (OuterVolumeSpecName: "inventory") pod "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" (UID: "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.472902 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" (UID: "9675a4b8-6cd8-4b62-a28d-73542c5d1b2a"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.540264 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.540297 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.540309 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.540317 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.540328 4940 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.540339 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jmzk\" (UniqueName: \"kubernetes.io/projected/9675a4b8-6cd8-4b62-a28d-73542c5d1b2a-kube-api-access-9jmzk\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.705008 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-22fh4" event={"ID":"9675a4b8-6cd8-4b62-a28d-73542c5d1b2a","Type":"ContainerDied","Data":"dc701530e4e442fb1b2470c9f2fd318e3fcf7b1508a61d2a48a4a4ae8e1f9d60"} Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.705443 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc701530e4e442fb1b2470c9f2fd318e3fcf7b1508a61d2a48a4a4ae8e1f9d60" Nov 26 09:21:00 crc kubenswrapper[4940]: I1126 09:21:00.705098 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-22fh4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.485972 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-cn5r4"] Nov 26 09:21:01 crc kubenswrapper[4940]: E1126 09:21:01.486495 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" containerName="ovn-openstack-openstack-cell1" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.486510 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" containerName="ovn-openstack-openstack-cell1" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.486829 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9675a4b8-6cd8-4b62-a28d-73542c5d1b2a" containerName="ovn-openstack-openstack-cell1" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.487705 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.497609 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.497897 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.509012 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-cn5r4"] Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.563500 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8xhn\" (UniqueName: \"kubernetes.io/projected/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-kube-api-access-t8xhn\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.563554 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.563594 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.563646 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.563692 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.563853 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.563884 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.665995 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.666521 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.666625 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.666653 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.666715 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8xhn\" (UniqueName: \"kubernetes.io/projected/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-kube-api-access-t8xhn\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.666754 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.666824 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.672024 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.672719 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.673358 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.675760 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.676723 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.691899 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.702338 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8xhn\" (UniqueName: \"kubernetes.io/projected/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-kube-api-access-t8xhn\") pod \"neutron-metadata-openstack-openstack-cell1-cn5r4\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:01 crc kubenswrapper[4940]: I1126 09:21:01.819776 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:21:02 crc kubenswrapper[4940]: I1126 09:21:02.489198 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-cn5r4"] Nov 26 09:21:02 crc kubenswrapper[4940]: I1126 09:21:02.737025 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" event={"ID":"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a","Type":"ContainerStarted","Data":"152bac308f9dfc1b347032bfa5e713aaac06548baeaea160bd4600ff2003e84a"} Nov 26 09:21:03 crc kubenswrapper[4940]: I1126 09:21:03.753207 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" event={"ID":"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a","Type":"ContainerStarted","Data":"aae293eb31405e05ac052ac012d1e52de4d6824da00a2d03640ead63abb945a4"} Nov 26 09:21:03 crc kubenswrapper[4940]: I1126 09:21:03.788847 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" podStartSLOduration=2.2192452 podStartE2EDuration="2.788824349s" podCreationTimestamp="2025-11-26 09:21:01 +0000 UTC" firstStartedPulling="2025-11-26 09:21:02.491995794 +0000 UTC m=+8764.012137423" lastFinishedPulling="2025-11-26 09:21:03.061574953 +0000 UTC m=+8764.581716572" observedRunningTime="2025-11-26 09:21:03.775103694 +0000 UTC m=+8765.295245383" watchObservedRunningTime="2025-11-26 09:21:03.788824349 +0000 UTC m=+8765.308965988" Nov 26 09:21:07 crc kubenswrapper[4940]: I1126 09:21:07.375961 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:21:07 crc kubenswrapper[4940]: I1126 09:21:07.429724 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:21:10 crc kubenswrapper[4940]: I1126 09:21:10.805186 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nphg8"] Nov 26 09:21:10 crc kubenswrapper[4940]: I1126 09:21:10.805931 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nphg8" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="registry-server" containerID="cri-o://886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59" gracePeriod=2 Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.367707 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.496912 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-catalog-content\") pod \"b85f10fa-4a75-4529-8d11-912daacbf115\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.497010 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k9pr\" (UniqueName: \"kubernetes.io/projected/b85f10fa-4a75-4529-8d11-912daacbf115-kube-api-access-6k9pr\") pod \"b85f10fa-4a75-4529-8d11-912daacbf115\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.497070 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-utilities\") pod \"b85f10fa-4a75-4529-8d11-912daacbf115\" (UID: \"b85f10fa-4a75-4529-8d11-912daacbf115\") " Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.497892 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-utilities" (OuterVolumeSpecName: "utilities") pod "b85f10fa-4a75-4529-8d11-912daacbf115" (UID: "b85f10fa-4a75-4529-8d11-912daacbf115"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.504274 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b85f10fa-4a75-4529-8d11-912daacbf115-kube-api-access-6k9pr" (OuterVolumeSpecName: "kube-api-access-6k9pr") pod "b85f10fa-4a75-4529-8d11-912daacbf115" (UID: "b85f10fa-4a75-4529-8d11-912daacbf115"). InnerVolumeSpecName "kube-api-access-6k9pr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.566107 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b85f10fa-4a75-4529-8d11-912daacbf115" (UID: "b85f10fa-4a75-4529-8d11-912daacbf115"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.599847 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.599890 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k9pr\" (UniqueName: \"kubernetes.io/projected/b85f10fa-4a75-4529-8d11-912daacbf115-kube-api-access-6k9pr\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.599899 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b85f10fa-4a75-4529-8d11-912daacbf115-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.863818 4940 generic.go:334] "Generic (PLEG): container finished" podID="b85f10fa-4a75-4529-8d11-912daacbf115" containerID="886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59" exitCode=0 Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.863859 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nphg8" event={"ID":"b85f10fa-4a75-4529-8d11-912daacbf115","Type":"ContainerDied","Data":"886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59"} Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.863885 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nphg8" event={"ID":"b85f10fa-4a75-4529-8d11-912daacbf115","Type":"ContainerDied","Data":"50e10e13968291805417fa7fdaf224dd6aee0c77019c0b61feca81314bcb10b4"} Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.863900 4940 scope.go:117] "RemoveContainer" containerID="886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.863954 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nphg8" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.892216 4940 scope.go:117] "RemoveContainer" containerID="ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa" Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.924504 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nphg8"] Nov 26 09:21:11 crc kubenswrapper[4940]: I1126 09:21:11.939450 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nphg8"] Nov 26 09:21:12 crc kubenswrapper[4940]: I1126 09:21:12.423113 4940 scope.go:117] "RemoveContainer" containerID="84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69" Nov 26 09:21:12 crc kubenswrapper[4940]: I1126 09:21:12.472610 4940 scope.go:117] "RemoveContainer" containerID="886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59" Nov 26 09:21:12 crc kubenswrapper[4940]: E1126 09:21:12.476202 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59\": container with ID starting with 886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59 not found: ID does not exist" containerID="886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59" Nov 26 09:21:12 crc kubenswrapper[4940]: I1126 09:21:12.476261 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59"} err="failed to get container status \"886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59\": rpc error: code = NotFound desc = could not find container \"886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59\": container with ID starting with 886605d6256e708892a1297864bcb86a2a8ef1a392449e7a26be7f628f6b4e59 not found: ID does not exist" Nov 26 09:21:12 crc kubenswrapper[4940]: I1126 09:21:12.476302 4940 scope.go:117] "RemoveContainer" containerID="ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa" Nov 26 09:21:12 crc kubenswrapper[4940]: E1126 09:21:12.477363 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa\": container with ID starting with ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa not found: ID does not exist" containerID="ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa" Nov 26 09:21:12 crc kubenswrapper[4940]: I1126 09:21:12.477419 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa"} err="failed to get container status \"ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa\": rpc error: code = NotFound desc = could not find container \"ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa\": container with ID starting with ea85dc47df6e94a801a4b1592e8d0c9b09a28a435166a8909a67cf0e6fef3cfa not found: ID does not exist" Nov 26 09:21:12 crc kubenswrapper[4940]: I1126 09:21:12.477447 4940 scope.go:117] "RemoveContainer" containerID="84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69" Nov 26 09:21:12 crc kubenswrapper[4940]: E1126 09:21:12.477757 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69\": container with ID starting with 84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69 not found: ID does not exist" containerID="84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69" Nov 26 09:21:12 crc kubenswrapper[4940]: I1126 09:21:12.477800 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69"} err="failed to get container status \"84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69\": rpc error: code = NotFound desc = could not find container \"84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69\": container with ID starting with 84f4272e2305e3d0b2a42c9ab6c5cea6cd5589342123e8d3b0f245262d5d6c69 not found: ID does not exist" Nov 26 09:21:13 crc kubenswrapper[4940]: I1126 09:21:13.185656 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" path="/var/lib/kubelet/pods/b85f10fa-4a75-4529-8d11-912daacbf115/volumes" Nov 26 09:21:21 crc kubenswrapper[4940]: I1126 09:21:21.728001 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:21:21 crc kubenswrapper[4940]: I1126 09:21:21.728654 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:21:31 crc kubenswrapper[4940]: I1126 09:21:31.108999 4940 generic.go:334] "Generic (PLEG): container finished" podID="420f7d01-ed55-46a9-970a-fbd4beff5c75" containerID="e7ec8c54876d39b2fa4b97eea5fed30d6db9fcbc0515f9189455d5d54c5bb72f" exitCode=0 Nov 26 09:21:31 crc kubenswrapper[4940]: I1126 09:21:31.109076 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" event={"ID":"420f7d01-ed55-46a9-970a-fbd4beff5c75","Type":"ContainerDied","Data":"e7ec8c54876d39b2fa4b97eea5fed30d6db9fcbc0515f9189455d5d54c5bb72f"} Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.651678 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.743844 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-metadata-combined-ca-bundle\") pod \"420f7d01-ed55-46a9-970a-fbd4beff5c75\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.743988 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mprtt\" (UniqueName: \"kubernetes.io/projected/420f7d01-ed55-46a9-970a-fbd4beff5c75-kube-api-access-mprtt\") pod \"420f7d01-ed55-46a9-970a-fbd4beff5c75\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.744145 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-ovn-metadata-agent-neutron-config-0\") pod \"420f7d01-ed55-46a9-970a-fbd4beff5c75\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.744196 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-ssh-key\") pod \"420f7d01-ed55-46a9-970a-fbd4beff5c75\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.744238 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-nova-metadata-neutron-config-0\") pod \"420f7d01-ed55-46a9-970a-fbd4beff5c75\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.744291 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-inventory\") pod \"420f7d01-ed55-46a9-970a-fbd4beff5c75\" (UID: \"420f7d01-ed55-46a9-970a-fbd4beff5c75\") " Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.751356 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "420f7d01-ed55-46a9-970a-fbd4beff5c75" (UID: "420f7d01-ed55-46a9-970a-fbd4beff5c75"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.753316 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/420f7d01-ed55-46a9-970a-fbd4beff5c75-kube-api-access-mprtt" (OuterVolumeSpecName: "kube-api-access-mprtt") pod "420f7d01-ed55-46a9-970a-fbd4beff5c75" (UID: "420f7d01-ed55-46a9-970a-fbd4beff5c75"). InnerVolumeSpecName "kube-api-access-mprtt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.779779 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-inventory" (OuterVolumeSpecName: "inventory") pod "420f7d01-ed55-46a9-970a-fbd4beff5c75" (UID: "420f7d01-ed55-46a9-970a-fbd4beff5c75"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.782428 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "420f7d01-ed55-46a9-970a-fbd4beff5c75" (UID: "420f7d01-ed55-46a9-970a-fbd4beff5c75"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.788002 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "420f7d01-ed55-46a9-970a-fbd4beff5c75" (UID: "420f7d01-ed55-46a9-970a-fbd4beff5c75"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.795991 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "420f7d01-ed55-46a9-970a-fbd4beff5c75" (UID: "420f7d01-ed55-46a9-970a-fbd4beff5c75"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.846902 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.846936 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.846946 4940 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.846958 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.846970 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/420f7d01-ed55-46a9-970a-fbd4beff5c75-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:32 crc kubenswrapper[4940]: I1126 09:21:32.846980 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mprtt\" (UniqueName: \"kubernetes.io/projected/420f7d01-ed55-46a9-970a-fbd4beff5c75-kube-api-access-mprtt\") on node \"crc\" DevicePath \"\"" Nov 26 09:21:33 crc kubenswrapper[4940]: I1126 09:21:33.131467 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" event={"ID":"420f7d01-ed55-46a9-970a-fbd4beff5c75","Type":"ContainerDied","Data":"dbce6784501aab2bbf4a27bfaa192a4e87e43261640e63ad7b761f25896facb4"} Nov 26 09:21:33 crc kubenswrapper[4940]: I1126 09:21:33.131516 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbce6784501aab2bbf4a27bfaa192a4e87e43261640e63ad7b761f25896facb4" Nov 26 09:21:33 crc kubenswrapper[4940]: I1126 09:21:33.131528 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-networker-lc79g" Nov 26 09:21:51 crc kubenswrapper[4940]: I1126 09:21:51.729345 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:21:51 crc kubenswrapper[4940]: I1126 09:21:51.730402 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:21:51 crc kubenswrapper[4940]: I1126 09:21:51.730475 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:21:51 crc kubenswrapper[4940]: I1126 09:21:51.731932 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:21:51 crc kubenswrapper[4940]: I1126 09:21:51.732018 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" gracePeriod=600 Nov 26 09:21:51 crc kubenswrapper[4940]: E1126 09:21:51.860774 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:21:52 crc kubenswrapper[4940]: I1126 09:21:52.357940 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" exitCode=0 Nov 26 09:21:52 crc kubenswrapper[4940]: I1126 09:21:52.357979 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2"} Nov 26 09:21:52 crc kubenswrapper[4940]: I1126 09:21:52.358011 4940 scope.go:117] "RemoveContainer" containerID="9e9aff618bcec373fe2c25642a6939cbb5873fb50b14862a18a1ace3c3c4e20a" Nov 26 09:21:52 crc kubenswrapper[4940]: I1126 09:21:52.358628 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:21:52 crc kubenswrapper[4940]: E1126 09:21:52.358876 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.192259 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rhwwq"] Nov 26 09:21:58 crc kubenswrapper[4940]: E1126 09:21:58.193289 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="420f7d01-ed55-46a9-970a-fbd4beff5c75" containerName="neutron-metadata-openstack-openstack-networker" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.193308 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="420f7d01-ed55-46a9-970a-fbd4beff5c75" containerName="neutron-metadata-openstack-openstack-networker" Nov 26 09:21:58 crc kubenswrapper[4940]: E1126 09:21:58.193358 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="extract-content" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.193367 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="extract-content" Nov 26 09:21:58 crc kubenswrapper[4940]: E1126 09:21:58.193385 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="registry-server" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.193394 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="registry-server" Nov 26 09:21:58 crc kubenswrapper[4940]: E1126 09:21:58.193421 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="extract-utilities" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.193428 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="extract-utilities" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.193700 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="b85f10fa-4a75-4529-8d11-912daacbf115" containerName="registry-server" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.193731 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="420f7d01-ed55-46a9-970a-fbd4beff5c75" containerName="neutron-metadata-openstack-openstack-networker" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.206015 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.212552 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rhwwq"] Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.402006 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8zgm\" (UniqueName: \"kubernetes.io/projected/58e7da3a-50aa-4eef-bb16-808548f3e8ca-kube-api-access-p8zgm\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.402324 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-catalog-content\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.402664 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-utilities\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.504383 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-utilities\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.504485 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8zgm\" (UniqueName: \"kubernetes.io/projected/58e7da3a-50aa-4eef-bb16-808548f3e8ca-kube-api-access-p8zgm\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.504535 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-catalog-content\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.505289 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-catalog-content\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.505284 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-utilities\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.525959 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8zgm\" (UniqueName: \"kubernetes.io/projected/58e7da3a-50aa-4eef-bb16-808548f3e8ca-kube-api-access-p8zgm\") pod \"redhat-operators-rhwwq\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:58 crc kubenswrapper[4940]: I1126 09:21:58.538195 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:21:59 crc kubenswrapper[4940]: I1126 09:21:59.024270 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rhwwq"] Nov 26 09:21:59 crc kubenswrapper[4940]: I1126 09:21:59.433152 4940 generic.go:334] "Generic (PLEG): container finished" podID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerID="5709e0c2519c58984e8bef4697f841f81559387980668c4f45e886e91b276d31" exitCode=0 Nov 26 09:21:59 crc kubenswrapper[4940]: I1126 09:21:59.433471 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhwwq" event={"ID":"58e7da3a-50aa-4eef-bb16-808548f3e8ca","Type":"ContainerDied","Data":"5709e0c2519c58984e8bef4697f841f81559387980668c4f45e886e91b276d31"} Nov 26 09:21:59 crc kubenswrapper[4940]: I1126 09:21:59.433505 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhwwq" event={"ID":"58e7da3a-50aa-4eef-bb16-808548f3e8ca","Type":"ContainerStarted","Data":"8bdfa4881903f64115387180e8054054fd3391e29dc437059a69fe92802488c8"} Nov 26 09:22:00 crc kubenswrapper[4940]: I1126 09:22:00.446708 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhwwq" event={"ID":"58e7da3a-50aa-4eef-bb16-808548f3e8ca","Type":"ContainerStarted","Data":"9a0fc2d9d13c4afeedd0dbd8d24b5918ed75e42b144c18ccd7db89fb1b34e1ab"} Nov 26 09:22:03 crc kubenswrapper[4940]: I1126 09:22:03.165742 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:22:03 crc kubenswrapper[4940]: E1126 09:22:03.168025 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:22:03 crc kubenswrapper[4940]: I1126 09:22:03.506031 4940 generic.go:334] "Generic (PLEG): container finished" podID="c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" containerID="aae293eb31405e05ac052ac012d1e52de4d6824da00a2d03640ead63abb945a4" exitCode=0 Nov 26 09:22:03 crc kubenswrapper[4940]: I1126 09:22:03.506102 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" event={"ID":"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a","Type":"ContainerDied","Data":"aae293eb31405e05ac052ac012d1e52de4d6824da00a2d03640ead63abb945a4"} Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.795451 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.981463 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8xhn\" (UniqueName: \"kubernetes.io/projected/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-kube-api-access-t8xhn\") pod \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.981601 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ceph\") pod \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.981628 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-metadata-combined-ca-bundle\") pod \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.981741 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ssh-key\") pod \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.981797 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.981865 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-inventory\") pod \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.981929 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-nova-metadata-neutron-config-0\") pod \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\" (UID: \"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a\") " Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.987804 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" (UID: "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.988913 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ceph" (OuterVolumeSpecName: "ceph") pod "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" (UID: "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:22:05 crc kubenswrapper[4940]: I1126 09:22:05.993324 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-kube-api-access-t8xhn" (OuterVolumeSpecName: "kube-api-access-t8xhn") pod "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" (UID: "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a"). InnerVolumeSpecName "kube-api-access-t8xhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.017017 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-inventory" (OuterVolumeSpecName: "inventory") pod "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" (UID: "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.018938 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" (UID: "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.031962 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" (UID: "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.034270 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" (UID: "c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.086051 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.086085 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.086100 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.086112 4940 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.086122 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8xhn\" (UniqueName: \"kubernetes.io/projected/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-kube-api-access-t8xhn\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.086131 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.086139 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.545474 4940 generic.go:334] "Generic (PLEG): container finished" podID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerID="9a0fc2d9d13c4afeedd0dbd8d24b5918ed75e42b144c18ccd7db89fb1b34e1ab" exitCode=0 Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.545885 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhwwq" event={"ID":"58e7da3a-50aa-4eef-bb16-808548f3e8ca","Type":"ContainerDied","Data":"9a0fc2d9d13c4afeedd0dbd8d24b5918ed75e42b144c18ccd7db89fb1b34e1ab"} Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.549969 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" event={"ID":"c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a","Type":"ContainerDied","Data":"152bac308f9dfc1b347032bfa5e713aaac06548baeaea160bd4600ff2003e84a"} Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.550022 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="152bac308f9dfc1b347032bfa5e713aaac06548baeaea160bd4600ff2003e84a" Nov 26 09:22:06 crc kubenswrapper[4940]: I1126 09:22:06.550125 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-cn5r4" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.036789 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-6wckh"] Nov 26 09:22:07 crc kubenswrapper[4940]: E1126 09:22:07.037259 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" containerName="neutron-metadata-openstack-openstack-cell1" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.037273 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" containerName="neutron-metadata-openstack-openstack-cell1" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.037488 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a" containerName="neutron-metadata-openstack-openstack-cell1" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.038396 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.041844 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.041945 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.042205 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.042230 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.042387 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.053210 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-6wckh"] Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.112246 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcg5x\" (UniqueName: \"kubernetes.io/projected/a96978df-fadc-461a-91ec-fe51f593b61a-kube-api-access-pcg5x\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.112795 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-inventory\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.113237 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ssh-key\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.113461 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.113658 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.113904 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ceph\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.215524 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.215644 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ceph\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.215701 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcg5x\" (UniqueName: \"kubernetes.io/projected/a96978df-fadc-461a-91ec-fe51f593b61a-kube-api-access-pcg5x\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.215789 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-inventory\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.215814 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ssh-key\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.215858 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.222524 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.222695 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-inventory\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.222574 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ssh-key\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.223259 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.223276 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ceph\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.242488 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcg5x\" (UniqueName: \"kubernetes.io/projected/a96978df-fadc-461a-91ec-fe51f593b61a-kube-api-access-pcg5x\") pod \"libvirt-openstack-openstack-cell1-6wckh\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:07 crc kubenswrapper[4940]: I1126 09:22:07.359529 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:22:08 crc kubenswrapper[4940]: I1126 09:22:08.489253 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-6wckh"] Nov 26 09:22:08 crc kubenswrapper[4940]: I1126 09:22:08.577158 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhwwq" event={"ID":"58e7da3a-50aa-4eef-bb16-808548f3e8ca","Type":"ContainerStarted","Data":"fc229676ec5aadd4a00bee9f12a0e0ca89ac9f1894c2e830a96a86c226805bca"} Nov 26 09:22:08 crc kubenswrapper[4940]: I1126 09:22:08.578553 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" event={"ID":"a96978df-fadc-461a-91ec-fe51f593b61a","Type":"ContainerStarted","Data":"4ff3b973368ad82fffc924acea5be56f37de91910616a619097f5743cb3bb445"} Nov 26 09:22:08 crc kubenswrapper[4940]: I1126 09:22:08.594163 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rhwwq" podStartSLOduration=2.897240708 podStartE2EDuration="10.594139307s" podCreationTimestamp="2025-11-26 09:21:58 +0000 UTC" firstStartedPulling="2025-11-26 09:21:59.434899397 +0000 UTC m=+8820.955041016" lastFinishedPulling="2025-11-26 09:22:07.131797986 +0000 UTC m=+8828.651939615" observedRunningTime="2025-11-26 09:22:08.592726372 +0000 UTC m=+8830.112868031" watchObservedRunningTime="2025-11-26 09:22:08.594139307 +0000 UTC m=+8830.114280936" Nov 26 09:22:09 crc kubenswrapper[4940]: E1126 09:22:09.480829 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58e7da3a_50aa_4eef_bb16_808548f3e8ca.slice/crio-5709e0c2519c58984e8bef4697f841f81559387980668c4f45e886e91b276d31.scope\": RecentStats: unable to find data in memory cache]" Nov 26 09:22:09 crc kubenswrapper[4940]: I1126 09:22:09.593060 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" event={"ID":"a96978df-fadc-461a-91ec-fe51f593b61a","Type":"ContainerStarted","Data":"1cab4da027ce234f378984cd03ec5acf193c21713f59d44c18687382803d52ac"} Nov 26 09:22:09 crc kubenswrapper[4940]: I1126 09:22:09.629249 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" podStartSLOduration=1.9707460970000001 podStartE2EDuration="2.629227729s" podCreationTimestamp="2025-11-26 09:22:07 +0000 UTC" firstStartedPulling="2025-11-26 09:22:08.496711843 +0000 UTC m=+8830.016853462" lastFinishedPulling="2025-11-26 09:22:09.155193475 +0000 UTC m=+8830.675335094" observedRunningTime="2025-11-26 09:22:09.617442325 +0000 UTC m=+8831.137583964" watchObservedRunningTime="2025-11-26 09:22:09.629227729 +0000 UTC m=+8831.149369348" Nov 26 09:22:14 crc kubenswrapper[4940]: I1126 09:22:14.166193 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:22:14 crc kubenswrapper[4940]: E1126 09:22:14.166849 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:22:18 crc kubenswrapper[4940]: I1126 09:22:18.538989 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:22:18 crc kubenswrapper[4940]: I1126 09:22:18.540272 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:22:19 crc kubenswrapper[4940]: E1126 09:22:19.802283 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58e7da3a_50aa_4eef_bb16_808548f3e8ca.slice/crio-5709e0c2519c58984e8bef4697f841f81559387980668c4f45e886e91b276d31.scope\": RecentStats: unable to find data in memory cache]" Nov 26 09:22:20 crc kubenswrapper[4940]: I1126 09:22:20.131233 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rhwwq" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="registry-server" probeResult="failure" output=< Nov 26 09:22:20 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:22:20 crc kubenswrapper[4940]: > Nov 26 09:22:27 crc kubenswrapper[4940]: I1126 09:22:27.166228 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:22:27 crc kubenswrapper[4940]: E1126 09:22:27.166942 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:22:28 crc kubenswrapper[4940]: I1126 09:22:28.843130 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:22:28 crc kubenswrapper[4940]: I1126 09:22:28.913554 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:22:29 crc kubenswrapper[4940]: I1126 09:22:29.398067 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rhwwq"] Nov 26 09:22:30 crc kubenswrapper[4940]: E1126 09:22:30.138544 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58e7da3a_50aa_4eef_bb16_808548f3e8ca.slice/crio-5709e0c2519c58984e8bef4697f841f81559387980668c4f45e886e91b276d31.scope\": RecentStats: unable to find data in memory cache]" Nov 26 09:22:30 crc kubenswrapper[4940]: I1126 09:22:30.822577 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rhwwq" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="registry-server" containerID="cri-o://fc229676ec5aadd4a00bee9f12a0e0ca89ac9f1894c2e830a96a86c226805bca" gracePeriod=2 Nov 26 09:22:31 crc kubenswrapper[4940]: I1126 09:22:31.850718 4940 generic.go:334] "Generic (PLEG): container finished" podID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerID="fc229676ec5aadd4a00bee9f12a0e0ca89ac9f1894c2e830a96a86c226805bca" exitCode=0 Nov 26 09:22:31 crc kubenswrapper[4940]: I1126 09:22:31.851293 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhwwq" event={"ID":"58e7da3a-50aa-4eef-bb16-808548f3e8ca","Type":"ContainerDied","Data":"fc229676ec5aadd4a00bee9f12a0e0ca89ac9f1894c2e830a96a86c226805bca"} Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.023513 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.077948 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-utilities\") pod \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.078081 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8zgm\" (UniqueName: \"kubernetes.io/projected/58e7da3a-50aa-4eef-bb16-808548f3e8ca-kube-api-access-p8zgm\") pod \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.078230 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-catalog-content\") pod \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\" (UID: \"58e7da3a-50aa-4eef-bb16-808548f3e8ca\") " Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.078789 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-utilities" (OuterVolumeSpecName: "utilities") pod "58e7da3a-50aa-4eef-bb16-808548f3e8ca" (UID: "58e7da3a-50aa-4eef-bb16-808548f3e8ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.086143 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58e7da3a-50aa-4eef-bb16-808548f3e8ca-kube-api-access-p8zgm" (OuterVolumeSpecName: "kube-api-access-p8zgm") pod "58e7da3a-50aa-4eef-bb16-808548f3e8ca" (UID: "58e7da3a-50aa-4eef-bb16-808548f3e8ca"). InnerVolumeSpecName "kube-api-access-p8zgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.171537 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58e7da3a-50aa-4eef-bb16-808548f3e8ca" (UID: "58e7da3a-50aa-4eef-bb16-808548f3e8ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.180814 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.180863 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58e7da3a-50aa-4eef-bb16-808548f3e8ca-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.180875 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8zgm\" (UniqueName: \"kubernetes.io/projected/58e7da3a-50aa-4eef-bb16-808548f3e8ca-kube-api-access-p8zgm\") on node \"crc\" DevicePath \"\"" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.869003 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rhwwq" event={"ID":"58e7da3a-50aa-4eef-bb16-808548f3e8ca","Type":"ContainerDied","Data":"8bdfa4881903f64115387180e8054054fd3391e29dc437059a69fe92802488c8"} Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.869207 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rhwwq" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.869303 4940 scope.go:117] "RemoveContainer" containerID="fc229676ec5aadd4a00bee9f12a0e0ca89ac9f1894c2e830a96a86c226805bca" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.900296 4940 scope.go:117] "RemoveContainer" containerID="9a0fc2d9d13c4afeedd0dbd8d24b5918ed75e42b144c18ccd7db89fb1b34e1ab" Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.927366 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rhwwq"] Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.935900 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rhwwq"] Nov 26 09:22:32 crc kubenswrapper[4940]: I1126 09:22:32.938314 4940 scope.go:117] "RemoveContainer" containerID="5709e0c2519c58984e8bef4697f841f81559387980668c4f45e886e91b276d31" Nov 26 09:22:33 crc kubenswrapper[4940]: I1126 09:22:33.206280 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" path="/var/lib/kubelet/pods/58e7da3a-50aa-4eef-bb16-808548f3e8ca/volumes" Nov 26 09:22:40 crc kubenswrapper[4940]: E1126 09:22:40.389021 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58e7da3a_50aa_4eef_bb16_808548f3e8ca.slice/crio-5709e0c2519c58984e8bef4697f841f81559387980668c4f45e886e91b276d31.scope\": RecentStats: unable to find data in memory cache]" Nov 26 09:22:41 crc kubenswrapper[4940]: I1126 09:22:41.167513 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:22:41 crc kubenswrapper[4940]: E1126 09:22:41.168346 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:22:50 crc kubenswrapper[4940]: E1126 09:22:50.730294 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58e7da3a_50aa_4eef_bb16_808548f3e8ca.slice/crio-5709e0c2519c58984e8bef4697f841f81559387980668c4f45e886e91b276d31.scope\": RecentStats: unable to find data in memory cache]" Nov 26 09:22:52 crc kubenswrapper[4940]: I1126 09:22:52.166443 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:22:52 crc kubenswrapper[4940]: E1126 09:22:52.167521 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:23:03 crc kubenswrapper[4940]: I1126 09:23:03.165803 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:23:03 crc kubenswrapper[4940]: E1126 09:23:03.166548 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:23:15 crc kubenswrapper[4940]: I1126 09:23:15.166207 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:23:15 crc kubenswrapper[4940]: E1126 09:23:15.167490 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:23:29 crc kubenswrapper[4940]: I1126 09:23:29.181686 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:23:29 crc kubenswrapper[4940]: E1126 09:23:29.182692 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:23:35 crc kubenswrapper[4940]: I1126 09:23:35.502601 4940 trace.go:236] Trace[1823097770]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-4r5p4" (26-Nov-2025 09:23:34.497) (total time: 1004ms): Nov 26 09:23:35 crc kubenswrapper[4940]: Trace[1823097770]: [1.004754289s] [1.004754289s] END Nov 26 09:23:40 crc kubenswrapper[4940]: I1126 09:23:40.166269 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:23:40 crc kubenswrapper[4940]: E1126 09:23:40.167032 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:23:52 crc kubenswrapper[4940]: I1126 09:23:52.165787 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:23:52 crc kubenswrapper[4940]: E1126 09:23:52.166517 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:24:04 crc kubenswrapper[4940]: I1126 09:24:04.166123 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:24:04 crc kubenswrapper[4940]: E1126 09:24:04.169181 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:24:15 crc kubenswrapper[4940]: I1126 09:24:15.185611 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:24:15 crc kubenswrapper[4940]: E1126 09:24:15.186974 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:24:28 crc kubenswrapper[4940]: I1126 09:24:28.165481 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:24:28 crc kubenswrapper[4940]: E1126 09:24:28.168091 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:24:41 crc kubenswrapper[4940]: I1126 09:24:41.165689 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:24:41 crc kubenswrapper[4940]: E1126 09:24:41.166608 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:24:53 crc kubenswrapper[4940]: I1126 09:24:53.165603 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:24:53 crc kubenswrapper[4940]: E1126 09:24:53.166386 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:25:06 crc kubenswrapper[4940]: I1126 09:25:06.165151 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:25:06 crc kubenswrapper[4940]: E1126 09:25:06.165880 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:25:21 crc kubenswrapper[4940]: I1126 09:25:21.165822 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:25:21 crc kubenswrapper[4940]: E1126 09:25:21.166929 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:25:32 crc kubenswrapper[4940]: I1126 09:25:32.165471 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:25:32 crc kubenswrapper[4940]: E1126 09:25:32.168073 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:25:46 crc kubenswrapper[4940]: I1126 09:25:46.165883 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:25:46 crc kubenswrapper[4940]: E1126 09:25:46.167183 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:26:01 crc kubenswrapper[4940]: I1126 09:26:01.165921 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:26:01 crc kubenswrapper[4940]: E1126 09:26:01.167313 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.849172 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l9sdb"] Nov 26 09:26:07 crc kubenswrapper[4940]: E1126 09:26:07.850158 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="extract-content" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.850173 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="extract-content" Nov 26 09:26:07 crc kubenswrapper[4940]: E1126 09:26:07.850185 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="registry-server" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.850192 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="registry-server" Nov 26 09:26:07 crc kubenswrapper[4940]: E1126 09:26:07.850218 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="extract-utilities" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.850224 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="extract-utilities" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.850456 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="58e7da3a-50aa-4eef-bb16-808548f3e8ca" containerName="registry-server" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.852088 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.868083 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l9sdb"] Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.968225 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-catalog-content\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.968617 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-utilities\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:07 crc kubenswrapper[4940]: I1126 09:26:07.968671 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvqqk\" (UniqueName: \"kubernetes.io/projected/35e85b50-a973-4d3e-9e8c-d54830e907a0-kube-api-access-fvqqk\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:08 crc kubenswrapper[4940]: I1126 09:26:08.071164 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-utilities\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:08 crc kubenswrapper[4940]: I1126 09:26:08.071418 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvqqk\" (UniqueName: \"kubernetes.io/projected/35e85b50-a973-4d3e-9e8c-d54830e907a0-kube-api-access-fvqqk\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:08 crc kubenswrapper[4940]: I1126 09:26:08.071542 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-catalog-content\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:08 crc kubenswrapper[4940]: I1126 09:26:08.071811 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-utilities\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:08 crc kubenswrapper[4940]: I1126 09:26:08.071902 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-catalog-content\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:08 crc kubenswrapper[4940]: I1126 09:26:08.091809 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvqqk\" (UniqueName: \"kubernetes.io/projected/35e85b50-a973-4d3e-9e8c-d54830e907a0-kube-api-access-fvqqk\") pod \"redhat-marketplace-l9sdb\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:08 crc kubenswrapper[4940]: I1126 09:26:08.183956 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:08 crc kubenswrapper[4940]: I1126 09:26:08.670962 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l9sdb"] Nov 26 09:26:09 crc kubenswrapper[4940]: I1126 09:26:09.513068 4940 generic.go:334] "Generic (PLEG): container finished" podID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerID="574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0" exitCode=0 Nov 26 09:26:09 crc kubenswrapper[4940]: I1126 09:26:09.513145 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l9sdb" event={"ID":"35e85b50-a973-4d3e-9e8c-d54830e907a0","Type":"ContainerDied","Data":"574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0"} Nov 26 09:26:09 crc kubenswrapper[4940]: I1126 09:26:09.513469 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l9sdb" event={"ID":"35e85b50-a973-4d3e-9e8c-d54830e907a0","Type":"ContainerStarted","Data":"2d4c7c0e07ab7cf429414dd7809addb17f92b2230c49724c00c50c812ddd1084"} Nov 26 09:26:09 crc kubenswrapper[4940]: I1126 09:26:09.515119 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:26:11 crc kubenswrapper[4940]: I1126 09:26:11.535485 4940 generic.go:334] "Generic (PLEG): container finished" podID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerID="033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030" exitCode=0 Nov 26 09:26:11 crc kubenswrapper[4940]: I1126 09:26:11.535540 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l9sdb" event={"ID":"35e85b50-a973-4d3e-9e8c-d54830e907a0","Type":"ContainerDied","Data":"033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030"} Nov 26 09:26:13 crc kubenswrapper[4940]: I1126 09:26:13.166704 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:26:13 crc kubenswrapper[4940]: E1126 09:26:13.167533 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:26:13 crc kubenswrapper[4940]: I1126 09:26:13.562936 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l9sdb" event={"ID":"35e85b50-a973-4d3e-9e8c-d54830e907a0","Type":"ContainerStarted","Data":"a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b"} Nov 26 09:26:13 crc kubenswrapper[4940]: I1126 09:26:13.585611 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l9sdb" podStartSLOduration=3.730931721 podStartE2EDuration="6.585585099s" podCreationTimestamp="2025-11-26 09:26:07 +0000 UTC" firstStartedPulling="2025-11-26 09:26:09.514809269 +0000 UTC m=+9071.034950888" lastFinishedPulling="2025-11-26 09:26:12.369462647 +0000 UTC m=+9073.889604266" observedRunningTime="2025-11-26 09:26:13.579831287 +0000 UTC m=+9075.099972906" watchObservedRunningTime="2025-11-26 09:26:13.585585099 +0000 UTC m=+9075.105726718" Nov 26 09:26:18 crc kubenswrapper[4940]: I1126 09:26:18.185028 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:18 crc kubenswrapper[4940]: I1126 09:26:18.186923 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:18 crc kubenswrapper[4940]: I1126 09:26:18.229679 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:18 crc kubenswrapper[4940]: I1126 09:26:18.706324 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:18 crc kubenswrapper[4940]: I1126 09:26:18.778166 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l9sdb"] Nov 26 09:26:20 crc kubenswrapper[4940]: I1126 09:26:20.665962 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l9sdb" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerName="registry-server" containerID="cri-o://a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b" gracePeriod=2 Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.195404 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.240444 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvqqk\" (UniqueName: \"kubernetes.io/projected/35e85b50-a973-4d3e-9e8c-d54830e907a0-kube-api-access-fvqqk\") pod \"35e85b50-a973-4d3e-9e8c-d54830e907a0\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.240583 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-catalog-content\") pod \"35e85b50-a973-4d3e-9e8c-d54830e907a0\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.240645 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-utilities\") pod \"35e85b50-a973-4d3e-9e8c-d54830e907a0\" (UID: \"35e85b50-a973-4d3e-9e8c-d54830e907a0\") " Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.243713 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-utilities" (OuterVolumeSpecName: "utilities") pod "35e85b50-a973-4d3e-9e8c-d54830e907a0" (UID: "35e85b50-a973-4d3e-9e8c-d54830e907a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.262308 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35e85b50-a973-4d3e-9e8c-d54830e907a0-kube-api-access-fvqqk" (OuterVolumeSpecName: "kube-api-access-fvqqk") pod "35e85b50-a973-4d3e-9e8c-d54830e907a0" (UID: "35e85b50-a973-4d3e-9e8c-d54830e907a0"). InnerVolumeSpecName "kube-api-access-fvqqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.277858 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "35e85b50-a973-4d3e-9e8c-d54830e907a0" (UID: "35e85b50-a973-4d3e-9e8c-d54830e907a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.342939 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.343309 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e85b50-a973-4d3e-9e8c-d54830e907a0-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.343419 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvqqk\" (UniqueName: \"kubernetes.io/projected/35e85b50-a973-4d3e-9e8c-d54830e907a0-kube-api-access-fvqqk\") on node \"crc\" DevicePath \"\"" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.681774 4940 generic.go:334] "Generic (PLEG): container finished" podID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerID="a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b" exitCode=0 Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.681820 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l9sdb" event={"ID":"35e85b50-a973-4d3e-9e8c-d54830e907a0","Type":"ContainerDied","Data":"a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b"} Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.681880 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l9sdb" event={"ID":"35e85b50-a973-4d3e-9e8c-d54830e907a0","Type":"ContainerDied","Data":"2d4c7c0e07ab7cf429414dd7809addb17f92b2230c49724c00c50c812ddd1084"} Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.681897 4940 scope.go:117] "RemoveContainer" containerID="a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.683407 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l9sdb" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.716240 4940 scope.go:117] "RemoveContainer" containerID="033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.725241 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l9sdb"] Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.737569 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l9sdb"] Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.746883 4940 scope.go:117] "RemoveContainer" containerID="574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.795418 4940 scope.go:117] "RemoveContainer" containerID="a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b" Nov 26 09:26:21 crc kubenswrapper[4940]: E1126 09:26:21.796259 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b\": container with ID starting with a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b not found: ID does not exist" containerID="a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.796307 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b"} err="failed to get container status \"a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b\": rpc error: code = NotFound desc = could not find container \"a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b\": container with ID starting with a7588abab7f46aab19d9e55d8dc8f6d4057fb8ca40332c0064af06655f7b679b not found: ID does not exist" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.796358 4940 scope.go:117] "RemoveContainer" containerID="033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030" Nov 26 09:26:21 crc kubenswrapper[4940]: E1126 09:26:21.796759 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030\": container with ID starting with 033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030 not found: ID does not exist" containerID="033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.796786 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030"} err="failed to get container status \"033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030\": rpc error: code = NotFound desc = could not find container \"033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030\": container with ID starting with 033f9175612d4b501c1fbbb25e98de7288e82e4aab7255fa5176977cfd4e8030 not found: ID does not exist" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.796802 4940 scope.go:117] "RemoveContainer" containerID="574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0" Nov 26 09:26:21 crc kubenswrapper[4940]: E1126 09:26:21.797158 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0\": container with ID starting with 574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0 not found: ID does not exist" containerID="574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0" Nov 26 09:26:21 crc kubenswrapper[4940]: I1126 09:26:21.797188 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0"} err="failed to get container status \"574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0\": rpc error: code = NotFound desc = could not find container \"574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0\": container with ID starting with 574cc2a7a8b795ef0e7dede972371b50ef056fe3ed6cc2fb00ca7aad9e7eb4c0 not found: ID does not exist" Nov 26 09:26:23 crc kubenswrapper[4940]: I1126 09:26:23.179159 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" path="/var/lib/kubelet/pods/35e85b50-a973-4d3e-9e8c-d54830e907a0/volumes" Nov 26 09:26:27 crc kubenswrapper[4940]: I1126 09:26:27.165361 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:26:27 crc kubenswrapper[4940]: E1126 09:26:27.166004 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:26:41 crc kubenswrapper[4940]: I1126 09:26:41.165755 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:26:41 crc kubenswrapper[4940]: E1126 09:26:41.166572 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:26:52 crc kubenswrapper[4940]: I1126 09:26:52.167091 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:26:53 crc kubenswrapper[4940]: I1126 09:26:53.053316 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"ed761410d93cf652e0b5993814b60fdb619a02b40f042d2f0335a6ab015bf4a4"} Nov 26 09:27:11 crc kubenswrapper[4940]: I1126 09:27:11.262710 4940 generic.go:334] "Generic (PLEG): container finished" podID="a96978df-fadc-461a-91ec-fe51f593b61a" containerID="1cab4da027ce234f378984cd03ec5acf193c21713f59d44c18687382803d52ac" exitCode=0 Nov 26 09:27:11 crc kubenswrapper[4940]: I1126 09:27:11.262791 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" event={"ID":"a96978df-fadc-461a-91ec-fe51f593b61a","Type":"ContainerDied","Data":"1cab4da027ce234f378984cd03ec5acf193c21713f59d44c18687382803d52ac"} Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.831349 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.940320 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ssh-key\") pod \"a96978df-fadc-461a-91ec-fe51f593b61a\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.940736 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-inventory\") pod \"a96978df-fadc-461a-91ec-fe51f593b61a\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.940897 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ceph\") pod \"a96978df-fadc-461a-91ec-fe51f593b61a\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.940936 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-secret-0\") pod \"a96978df-fadc-461a-91ec-fe51f593b61a\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.941019 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-combined-ca-bundle\") pod \"a96978df-fadc-461a-91ec-fe51f593b61a\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.941098 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcg5x\" (UniqueName: \"kubernetes.io/projected/a96978df-fadc-461a-91ec-fe51f593b61a-kube-api-access-pcg5x\") pod \"a96978df-fadc-461a-91ec-fe51f593b61a\" (UID: \"a96978df-fadc-461a-91ec-fe51f593b61a\") " Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.955339 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a96978df-fadc-461a-91ec-fe51f593b61a-kube-api-access-pcg5x" (OuterVolumeSpecName: "kube-api-access-pcg5x") pod "a96978df-fadc-461a-91ec-fe51f593b61a" (UID: "a96978df-fadc-461a-91ec-fe51f593b61a"). InnerVolumeSpecName "kube-api-access-pcg5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.956152 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "a96978df-fadc-461a-91ec-fe51f593b61a" (UID: "a96978df-fadc-461a-91ec-fe51f593b61a"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.956973 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ceph" (OuterVolumeSpecName: "ceph") pod "a96978df-fadc-461a-91ec-fe51f593b61a" (UID: "a96978df-fadc-461a-91ec-fe51f593b61a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.975205 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-inventory" (OuterVolumeSpecName: "inventory") pod "a96978df-fadc-461a-91ec-fe51f593b61a" (UID: "a96978df-fadc-461a-91ec-fe51f593b61a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.976159 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "a96978df-fadc-461a-91ec-fe51f593b61a" (UID: "a96978df-fadc-461a-91ec-fe51f593b61a"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:27:12 crc kubenswrapper[4940]: I1126 09:27:12.979185 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a96978df-fadc-461a-91ec-fe51f593b61a" (UID: "a96978df-fadc-461a-91ec-fe51f593b61a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.043217 4940 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.043246 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcg5x\" (UniqueName: \"kubernetes.io/projected/a96978df-fadc-461a-91ec-fe51f593b61a-kube-api-access-pcg5x\") on node \"crc\" DevicePath \"\"" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.043256 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.043267 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.043275 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.043284 4940 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/a96978df-fadc-461a-91ec-fe51f593b61a-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.290301 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" event={"ID":"a96978df-fadc-461a-91ec-fe51f593b61a","Type":"ContainerDied","Data":"4ff3b973368ad82fffc924acea5be56f37de91910616a619097f5743cb3bb445"} Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.290363 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ff3b973368ad82fffc924acea5be56f37de91910616a619097f5743cb3bb445" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.290486 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-6wckh" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.445708 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-8jmsb"] Nov 26 09:27:13 crc kubenswrapper[4940]: E1126 09:27:13.446570 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerName="extract-content" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.446604 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerName="extract-content" Nov 26 09:27:13 crc kubenswrapper[4940]: E1126 09:27:13.446646 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerName="registry-server" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.446662 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerName="registry-server" Nov 26 09:27:13 crc kubenswrapper[4940]: E1126 09:27:13.446739 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a96978df-fadc-461a-91ec-fe51f593b61a" containerName="libvirt-openstack-openstack-cell1" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.446759 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="a96978df-fadc-461a-91ec-fe51f593b61a" containerName="libvirt-openstack-openstack-cell1" Nov 26 09:27:13 crc kubenswrapper[4940]: E1126 09:27:13.446808 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerName="extract-utilities" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.446824 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerName="extract-utilities" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.447389 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="a96978df-fadc-461a-91ec-fe51f593b61a" containerName="libvirt-openstack-openstack-cell1" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.447477 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="35e85b50-a973-4d3e-9e8c-d54830e907a0" containerName="registry-server" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.449265 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.454078 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.454262 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.454466 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.454535 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.454621 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.454671 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.454881 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.458889 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-8jmsb"] Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.552289 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzqfs\" (UniqueName: \"kubernetes.io/projected/ddec3681-bc80-4513-bb8a-fd193de5f12f-kube-api-access-xzqfs\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.552343 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.552418 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.552629 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.552870 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.552957 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.553009 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-inventory\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.553085 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.553230 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.553284 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.553434 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ceph\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.655203 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.655285 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.655315 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.655830 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-inventory\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.655865 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.655900 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.656201 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.656250 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ceph\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.656328 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzqfs\" (UniqueName: \"kubernetes.io/projected/ddec3681-bc80-4513-bb8a-fd193de5f12f-kube-api-access-xzqfs\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.656353 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.656399 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.657228 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.657265 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.658895 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.658906 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.659944 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.660949 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.661262 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ceph\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.662170 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.663208 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.673182 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-inventory\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.674745 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzqfs\" (UniqueName: \"kubernetes.io/projected/ddec3681-bc80-4513-bb8a-fd193de5f12f-kube-api-access-xzqfs\") pod \"nova-cell1-openstack-openstack-cell1-8jmsb\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:13 crc kubenswrapper[4940]: I1126 09:27:13.834056 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:27:14 crc kubenswrapper[4940]: I1126 09:27:14.396383 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-8jmsb"] Nov 26 09:27:15 crc kubenswrapper[4940]: I1126 09:27:15.316537 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" event={"ID":"ddec3681-bc80-4513-bb8a-fd193de5f12f","Type":"ContainerStarted","Data":"4393a227183290279bb77bdd9d20aefc7bc4d6cbedf8c1829406f2a4a85d3787"} Nov 26 09:27:15 crc kubenswrapper[4940]: I1126 09:27:15.316860 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" event={"ID":"ddec3681-bc80-4513-bb8a-fd193de5f12f","Type":"ContainerStarted","Data":"a4c6880b099c13afc293f893a22cf4ca451d966a9323d25ea55355267c7fcad7"} Nov 26 09:27:15 crc kubenswrapper[4940]: I1126 09:27:15.345405 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" podStartSLOduration=1.863889557 podStartE2EDuration="2.345363308s" podCreationTimestamp="2025-11-26 09:27:13 +0000 UTC" firstStartedPulling="2025-11-26 09:27:14.406184722 +0000 UTC m=+9135.926326341" lastFinishedPulling="2025-11-26 09:27:14.887658473 +0000 UTC m=+9136.407800092" observedRunningTime="2025-11-26 09:27:15.337329233 +0000 UTC m=+9136.857470862" watchObservedRunningTime="2025-11-26 09:27:15.345363308 +0000 UTC m=+9136.865504927" Nov 26 09:29:21 crc kubenswrapper[4940]: I1126 09:29:21.728623 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:29:21 crc kubenswrapper[4940]: I1126 09:29:21.729353 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:29:51 crc kubenswrapper[4940]: I1126 09:29:51.728142 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:29:51 crc kubenswrapper[4940]: I1126 09:29:51.728709 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.211048 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2"] Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.213871 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.221510 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.221543 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.238274 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2"] Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.329025 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tddrw\" (UniqueName: \"kubernetes.io/projected/6341ab6a-df09-4743-a2f4-443ea76ce8eb-kube-api-access-tddrw\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.329089 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6341ab6a-df09-4743-a2f4-443ea76ce8eb-config-volume\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.330699 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6341ab6a-df09-4743-a2f4-443ea76ce8eb-secret-volume\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.433176 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tddrw\" (UniqueName: \"kubernetes.io/projected/6341ab6a-df09-4743-a2f4-443ea76ce8eb-kube-api-access-tddrw\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.433239 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6341ab6a-df09-4743-a2f4-443ea76ce8eb-config-volume\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.433363 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6341ab6a-df09-4743-a2f4-443ea76ce8eb-secret-volume\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.434490 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6341ab6a-df09-4743-a2f4-443ea76ce8eb-config-volume\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.446690 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6341ab6a-df09-4743-a2f4-443ea76ce8eb-secret-volume\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.451185 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tddrw\" (UniqueName: \"kubernetes.io/projected/6341ab6a-df09-4743-a2f4-443ea76ce8eb-kube-api-access-tddrw\") pod \"collect-profiles-29402490-fn2p2\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:00 crc kubenswrapper[4940]: I1126 09:30:00.547643 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:01 crc kubenswrapper[4940]: I1126 09:30:01.049463 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2"] Nov 26 09:30:01 crc kubenswrapper[4940]: W1126 09:30:01.052624 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6341ab6a_df09_4743_a2f4_443ea76ce8eb.slice/crio-ead54923616eb10dfa6c4414aec78d6131a1312fa0cdb106765706e82f6b3ba0 WatchSource:0}: Error finding container ead54923616eb10dfa6c4414aec78d6131a1312fa0cdb106765706e82f6b3ba0: Status 404 returned error can't find the container with id ead54923616eb10dfa6c4414aec78d6131a1312fa0cdb106765706e82f6b3ba0 Nov 26 09:30:01 crc kubenswrapper[4940]: I1126 09:30:01.371186 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" event={"ID":"6341ab6a-df09-4743-a2f4-443ea76ce8eb","Type":"ContainerStarted","Data":"2e024f13ebf448faa7dbc95e4b8d90dd408d1a75d8f809c8180eb2c0305043db"} Nov 26 09:30:01 crc kubenswrapper[4940]: I1126 09:30:01.371563 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" event={"ID":"6341ab6a-df09-4743-a2f4-443ea76ce8eb","Type":"ContainerStarted","Data":"ead54923616eb10dfa6c4414aec78d6131a1312fa0cdb106765706e82f6b3ba0"} Nov 26 09:30:01 crc kubenswrapper[4940]: I1126 09:30:01.410160 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" podStartSLOduration=1.410132749 podStartE2EDuration="1.410132749s" podCreationTimestamp="2025-11-26 09:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:30:01.398109586 +0000 UTC m=+9302.918251225" watchObservedRunningTime="2025-11-26 09:30:01.410132749 +0000 UTC m=+9302.930274368" Nov 26 09:30:02 crc kubenswrapper[4940]: I1126 09:30:02.386864 4940 generic.go:334] "Generic (PLEG): container finished" podID="6341ab6a-df09-4743-a2f4-443ea76ce8eb" containerID="2e024f13ebf448faa7dbc95e4b8d90dd408d1a75d8f809c8180eb2c0305043db" exitCode=0 Nov 26 09:30:02 crc kubenswrapper[4940]: I1126 09:30:02.386937 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" event={"ID":"6341ab6a-df09-4743-a2f4-443ea76ce8eb","Type":"ContainerDied","Data":"2e024f13ebf448faa7dbc95e4b8d90dd408d1a75d8f809c8180eb2c0305043db"} Nov 26 09:30:03 crc kubenswrapper[4940]: I1126 09:30:03.813272 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:03 crc kubenswrapper[4940]: I1126 09:30:03.913984 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tddrw\" (UniqueName: \"kubernetes.io/projected/6341ab6a-df09-4743-a2f4-443ea76ce8eb-kube-api-access-tddrw\") pod \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " Nov 26 09:30:03 crc kubenswrapper[4940]: I1126 09:30:03.914395 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6341ab6a-df09-4743-a2f4-443ea76ce8eb-config-volume\") pod \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " Nov 26 09:30:03 crc kubenswrapper[4940]: I1126 09:30:03.914445 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6341ab6a-df09-4743-a2f4-443ea76ce8eb-secret-volume\") pod \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\" (UID: \"6341ab6a-df09-4743-a2f4-443ea76ce8eb\") " Nov 26 09:30:03 crc kubenswrapper[4940]: I1126 09:30:03.919689 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6341ab6a-df09-4743-a2f4-443ea76ce8eb-config-volume" (OuterVolumeSpecName: "config-volume") pod "6341ab6a-df09-4743-a2f4-443ea76ce8eb" (UID: "6341ab6a-df09-4743-a2f4-443ea76ce8eb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:30:03 crc kubenswrapper[4940]: I1126 09:30:03.921325 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6341ab6a-df09-4743-a2f4-443ea76ce8eb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6341ab6a-df09-4743-a2f4-443ea76ce8eb" (UID: "6341ab6a-df09-4743-a2f4-443ea76ce8eb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:03 crc kubenswrapper[4940]: I1126 09:30:03.934252 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6341ab6a-df09-4743-a2f4-443ea76ce8eb-kube-api-access-tddrw" (OuterVolumeSpecName: "kube-api-access-tddrw") pod "6341ab6a-df09-4743-a2f4-443ea76ce8eb" (UID: "6341ab6a-df09-4743-a2f4-443ea76ce8eb"). InnerVolumeSpecName "kube-api-access-tddrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:30:04 crc kubenswrapper[4940]: I1126 09:30:04.017498 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tddrw\" (UniqueName: \"kubernetes.io/projected/6341ab6a-df09-4743-a2f4-443ea76ce8eb-kube-api-access-tddrw\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:04 crc kubenswrapper[4940]: I1126 09:30:04.017542 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6341ab6a-df09-4743-a2f4-443ea76ce8eb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:04 crc kubenswrapper[4940]: I1126 09:30:04.017556 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6341ab6a-df09-4743-a2f4-443ea76ce8eb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:04 crc kubenswrapper[4940]: I1126 09:30:04.406448 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" event={"ID":"6341ab6a-df09-4743-a2f4-443ea76ce8eb","Type":"ContainerDied","Data":"ead54923616eb10dfa6c4414aec78d6131a1312fa0cdb106765706e82f6b3ba0"} Nov 26 09:30:04 crc kubenswrapper[4940]: I1126 09:30:04.406490 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ead54923616eb10dfa6c4414aec78d6131a1312fa0cdb106765706e82f6b3ba0" Nov 26 09:30:04 crc kubenswrapper[4940]: I1126 09:30:04.406541 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2" Nov 26 09:30:04 crc kubenswrapper[4940]: I1126 09:30:04.511563 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww"] Nov 26 09:30:04 crc kubenswrapper[4940]: I1126 09:30:04.523241 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402445-nmdww"] Nov 26 09:30:05 crc kubenswrapper[4940]: I1126 09:30:05.192102 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d53cfca0-f04e-482f-a536-6fc940329283" path="/var/lib/kubelet/pods/d53cfca0-f04e-482f-a536-6fc940329283/volumes" Nov 26 09:30:16 crc kubenswrapper[4940]: I1126 09:30:16.914420 4940 scope.go:117] "RemoveContainer" containerID="9e3961016e59236d040c4d49c63c58a5c259f9e70f1fcbf64ff0420e15542361" Nov 26 09:30:21 crc kubenswrapper[4940]: I1126 09:30:21.728882 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:30:21 crc kubenswrapper[4940]: I1126 09:30:21.729547 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:30:21 crc kubenswrapper[4940]: I1126 09:30:21.729623 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:30:21 crc kubenswrapper[4940]: I1126 09:30:21.730862 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ed761410d93cf652e0b5993814b60fdb619a02b40f042d2f0335a6ab015bf4a4"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:30:21 crc kubenswrapper[4940]: I1126 09:30:21.730964 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://ed761410d93cf652e0b5993814b60fdb619a02b40f042d2f0335a6ab015bf4a4" gracePeriod=600 Nov 26 09:30:22 crc kubenswrapper[4940]: I1126 09:30:22.661739 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="ed761410d93cf652e0b5993814b60fdb619a02b40f042d2f0335a6ab015bf4a4" exitCode=0 Nov 26 09:30:22 crc kubenswrapper[4940]: I1126 09:30:22.661938 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"ed761410d93cf652e0b5993814b60fdb619a02b40f042d2f0335a6ab015bf4a4"} Nov 26 09:30:22 crc kubenswrapper[4940]: I1126 09:30:22.662342 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e"} Nov 26 09:30:22 crc kubenswrapper[4940]: I1126 09:30:22.662371 4940 scope.go:117] "RemoveContainer" containerID="91c4c0d387aea5260bba11a560f85936983aea415284f7251ca7ef7784f1dce2" Nov 26 09:30:55 crc kubenswrapper[4940]: I1126 09:30:55.041453 4940 generic.go:334] "Generic (PLEG): container finished" podID="ddec3681-bc80-4513-bb8a-fd193de5f12f" containerID="4393a227183290279bb77bdd9d20aefc7bc4d6cbedf8c1829406f2a4a85d3787" exitCode=0 Nov 26 09:30:55 crc kubenswrapper[4940]: I1126 09:30:55.041544 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" event={"ID":"ddec3681-bc80-4513-bb8a-fd193de5f12f","Type":"ContainerDied","Data":"4393a227183290279bb77bdd9d20aefc7bc4d6cbedf8c1829406f2a4a85d3787"} Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.547512 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.708524 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ssh-key\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.708779 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-0\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.708940 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-0\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.709025 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ceph\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.709144 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-1\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.709248 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-combined-ca-bundle\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.709324 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-0\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.709430 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-1\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.709564 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzqfs\" (UniqueName: \"kubernetes.io/projected/ddec3681-bc80-4513-bb8a-fd193de5f12f-kube-api-access-xzqfs\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.709665 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-inventory\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.709755 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-1\") pod \"ddec3681-bc80-4513-bb8a-fd193de5f12f\" (UID: \"ddec3681-bc80-4513-bb8a-fd193de5f12f\") " Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.715744 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddec3681-bc80-4513-bb8a-fd193de5f12f-kube-api-access-xzqfs" (OuterVolumeSpecName: "kube-api-access-xzqfs") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "kube-api-access-xzqfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.716058 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ceph" (OuterVolumeSpecName: "ceph") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.724896 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.737913 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.738843 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.749102 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.749177 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.750890 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-inventory" (OuterVolumeSpecName: "inventory") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.761634 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.776496 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.785008 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "ddec3681-bc80-4513-bb8a-fd193de5f12f" (UID: "ddec3681-bc80-4513-bb8a-fd193de5f12f"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.813946 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814031 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814098 4940 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814124 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814145 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814163 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814180 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814200 4940 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814217 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzqfs\" (UniqueName: \"kubernetes.io/projected/ddec3681-bc80-4513-bb8a-fd193de5f12f-kube-api-access-xzqfs\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814235 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:56 crc kubenswrapper[4940]: I1126 09:30:56.814256 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ddec3681-bc80-4513-bb8a-fd193de5f12f-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.062911 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" event={"ID":"ddec3681-bc80-4513-bb8a-fd193de5f12f","Type":"ContainerDied","Data":"a4c6880b099c13afc293f893a22cf4ca451d966a9323d25ea55355267c7fcad7"} Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.063267 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4c6880b099c13afc293f893a22cf4ca451d966a9323d25ea55355267c7fcad7" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.062986 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-8jmsb" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.288719 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-8qd7v"] Nov 26 09:30:57 crc kubenswrapper[4940]: E1126 09:30:57.292200 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddec3681-bc80-4513-bb8a-fd193de5f12f" containerName="nova-cell1-openstack-openstack-cell1" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.292415 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddec3681-bc80-4513-bb8a-fd193de5f12f" containerName="nova-cell1-openstack-openstack-cell1" Nov 26 09:30:57 crc kubenswrapper[4940]: E1126 09:30:57.292519 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6341ab6a-df09-4743-a2f4-443ea76ce8eb" containerName="collect-profiles" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.292581 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6341ab6a-df09-4743-a2f4-443ea76ce8eb" containerName="collect-profiles" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.293322 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddec3681-bc80-4513-bb8a-fd193de5f12f" containerName="nova-cell1-openstack-openstack-cell1" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.293467 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6341ab6a-df09-4743-a2f4-443ea76ce8eb" containerName="collect-profiles" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.295304 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.299723 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.301445 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.302864 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.303285 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.303484 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.325732 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.325801 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.326716 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.327139 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-inventory\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.327185 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nd8sj\" (UniqueName: \"kubernetes.io/projected/9fc977d5-5151-447c-8006-0a318ab3b23e-kube-api-access-nd8sj\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.327264 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceph\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.327303 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.327468 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ssh-key\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.335551 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-8qd7v"] Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.428267 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.428375 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-inventory\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.428406 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nd8sj\" (UniqueName: \"kubernetes.io/projected/9fc977d5-5151-447c-8006-0a318ab3b23e-kube-api-access-nd8sj\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.428443 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceph\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.428466 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.428519 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ssh-key\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.428569 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.428618 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.433776 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.433939 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.434325 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-inventory\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.434602 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.435117 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ssh-key\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.435296 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceph\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.436874 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.449937 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nd8sj\" (UniqueName: \"kubernetes.io/projected/9fc977d5-5151-447c-8006-0a318ab3b23e-kube-api-access-nd8sj\") pod \"telemetry-openstack-openstack-cell1-8qd7v\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:57 crc kubenswrapper[4940]: I1126 09:30:57.624627 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:30:58 crc kubenswrapper[4940]: I1126 09:30:58.157957 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-8qd7v"] Nov 26 09:30:59 crc kubenswrapper[4940]: I1126 09:30:59.091079 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" event={"ID":"9fc977d5-5151-447c-8006-0a318ab3b23e","Type":"ContainerStarted","Data":"f7f73c6ba18c57b58d9f88344a19f0f410c90097bb2ed11294d58ad3416e5a1c"} Nov 26 09:30:59 crc kubenswrapper[4940]: I1126 09:30:59.091737 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" event={"ID":"9fc977d5-5151-447c-8006-0a318ab3b23e","Type":"ContainerStarted","Data":"9bc63b1a2d695120293028e3999c06c3c00f764d9fbed2dce9b2668380a6e4d6"} Nov 26 09:30:59 crc kubenswrapper[4940]: I1126 09:30:59.132020 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" podStartSLOduration=1.682776912 podStartE2EDuration="2.131992405s" podCreationTimestamp="2025-11-26 09:30:57 +0000 UTC" firstStartedPulling="2025-11-26 09:30:58.168949816 +0000 UTC m=+9359.689091435" lastFinishedPulling="2025-11-26 09:30:58.618165299 +0000 UTC m=+9360.138306928" observedRunningTime="2025-11-26 09:30:59.117905467 +0000 UTC m=+9360.638047106" watchObservedRunningTime="2025-11-26 09:30:59.131992405 +0000 UTC m=+9360.652134064" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.178518 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tnpk7"] Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.183133 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.192387 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tnpk7"] Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.239493 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z65pl\" (UniqueName: \"kubernetes.io/projected/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-kube-api-access-z65pl\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.239594 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-catalog-content\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.239688 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-utilities\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.341406 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-utilities\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.341512 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z65pl\" (UniqueName: \"kubernetes.io/projected/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-kube-api-access-z65pl\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.341603 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-catalog-content\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.342007 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-catalog-content\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.342257 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-utilities\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.362795 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z65pl\" (UniqueName: \"kubernetes.io/projected/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-kube-api-access-z65pl\") pod \"community-operators-tnpk7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:39 crc kubenswrapper[4940]: I1126 09:31:39.506319 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:40 crc kubenswrapper[4940]: I1126 09:31:40.752205 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tnpk7"] Nov 26 09:31:41 crc kubenswrapper[4940]: I1126 09:31:41.607461 4940 generic.go:334] "Generic (PLEG): container finished" podID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerID="9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624" exitCode=0 Nov 26 09:31:41 crc kubenswrapper[4940]: I1126 09:31:41.607511 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnpk7" event={"ID":"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7","Type":"ContainerDied","Data":"9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624"} Nov 26 09:31:41 crc kubenswrapper[4940]: I1126 09:31:41.607774 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnpk7" event={"ID":"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7","Type":"ContainerStarted","Data":"d72fef8ff360408122e615014680edeed7d57b6efd72cf1d1ea3a8f8a81825f0"} Nov 26 09:31:41 crc kubenswrapper[4940]: I1126 09:31:41.609466 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:31:43 crc kubenswrapper[4940]: I1126 09:31:43.633340 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnpk7" event={"ID":"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7","Type":"ContainerStarted","Data":"64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1"} Nov 26 09:31:44 crc kubenswrapper[4940]: I1126 09:31:44.645226 4940 generic.go:334] "Generic (PLEG): container finished" podID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerID="64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1" exitCode=0 Nov 26 09:31:44 crc kubenswrapper[4940]: I1126 09:31:44.645304 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnpk7" event={"ID":"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7","Type":"ContainerDied","Data":"64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1"} Nov 26 09:31:45 crc kubenswrapper[4940]: I1126 09:31:45.657436 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnpk7" event={"ID":"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7","Type":"ContainerStarted","Data":"776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41"} Nov 26 09:31:49 crc kubenswrapper[4940]: I1126 09:31:49.507250 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:49 crc kubenswrapper[4940]: I1126 09:31:49.507851 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:49 crc kubenswrapper[4940]: I1126 09:31:49.581267 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:49 crc kubenswrapper[4940]: I1126 09:31:49.610692 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tnpk7" podStartSLOduration=6.975804065 podStartE2EDuration="10.610664512s" podCreationTimestamp="2025-11-26 09:31:39 +0000 UTC" firstStartedPulling="2025-11-26 09:31:41.609202382 +0000 UTC m=+9403.129344001" lastFinishedPulling="2025-11-26 09:31:45.244062829 +0000 UTC m=+9406.764204448" observedRunningTime="2025-11-26 09:31:45.677778429 +0000 UTC m=+9407.197920048" watchObservedRunningTime="2025-11-26 09:31:49.610664512 +0000 UTC m=+9411.130806171" Nov 26 09:31:59 crc kubenswrapper[4940]: I1126 09:31:59.559464 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:31:59 crc kubenswrapper[4940]: I1126 09:31:59.637955 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tnpk7"] Nov 26 09:31:59 crc kubenswrapper[4940]: I1126 09:31:59.845624 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tnpk7" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerName="registry-server" containerID="cri-o://776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41" gracePeriod=2 Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.419209 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.504896 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z65pl\" (UniqueName: \"kubernetes.io/projected/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-kube-api-access-z65pl\") pod \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.504948 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-catalog-content\") pod \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.505145 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-utilities\") pod \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\" (UID: \"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7\") " Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.506541 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-utilities" (OuterVolumeSpecName: "utilities") pod "dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" (UID: "dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.515971 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-kube-api-access-z65pl" (OuterVolumeSpecName: "kube-api-access-z65pl") pod "dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" (UID: "dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7"). InnerVolumeSpecName "kube-api-access-z65pl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.587636 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" (UID: "dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.607697 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.607863 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z65pl\" (UniqueName: \"kubernetes.io/projected/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-kube-api-access-z65pl\") on node \"crc\" DevicePath \"\"" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.607881 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.858000 4940 generic.go:334] "Generic (PLEG): container finished" podID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerID="776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41" exitCode=0 Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.858078 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnpk7" event={"ID":"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7","Type":"ContainerDied","Data":"776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41"} Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.858127 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnpk7" event={"ID":"dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7","Type":"ContainerDied","Data":"d72fef8ff360408122e615014680edeed7d57b6efd72cf1d1ea3a8f8a81825f0"} Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.858152 4940 scope.go:117] "RemoveContainer" containerID="776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.858219 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnpk7" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.896585 4940 scope.go:117] "RemoveContainer" containerID="64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.906823 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tnpk7"] Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.922804 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tnpk7"] Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.938606 4940 scope.go:117] "RemoveContainer" containerID="9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.982388 4940 scope.go:117] "RemoveContainer" containerID="776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41" Nov 26 09:32:00 crc kubenswrapper[4940]: E1126 09:32:00.982819 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41\": container with ID starting with 776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41 not found: ID does not exist" containerID="776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.982852 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41"} err="failed to get container status \"776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41\": rpc error: code = NotFound desc = could not find container \"776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41\": container with ID starting with 776b5a312ca8edfd352c38e7fe9f25fe735b8114d42c074d28d87514724dcc41 not found: ID does not exist" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.982875 4940 scope.go:117] "RemoveContainer" containerID="64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1" Nov 26 09:32:00 crc kubenswrapper[4940]: E1126 09:32:00.983893 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1\": container with ID starting with 64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1 not found: ID does not exist" containerID="64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.983939 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1"} err="failed to get container status \"64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1\": rpc error: code = NotFound desc = could not find container \"64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1\": container with ID starting with 64765ce356c99a2f48f4652464c5b8266ecbc8a68b3770d6ee62aebe9aab58f1 not found: ID does not exist" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.983963 4940 scope.go:117] "RemoveContainer" containerID="9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624" Nov 26 09:32:00 crc kubenswrapper[4940]: E1126 09:32:00.984176 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624\": container with ID starting with 9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624 not found: ID does not exist" containerID="9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624" Nov 26 09:32:00 crc kubenswrapper[4940]: I1126 09:32:00.984198 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624"} err="failed to get container status \"9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624\": rpc error: code = NotFound desc = could not find container \"9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624\": container with ID starting with 9101eb4d0e673646ddd4e9869fcc09dc528aa7544ff2fbfbc5e0f58571adb624 not found: ID does not exist" Nov 26 09:32:01 crc kubenswrapper[4940]: I1126 09:32:01.178711 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" path="/var/lib/kubelet/pods/dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7/volumes" Nov 26 09:32:51 crc kubenswrapper[4940]: I1126 09:32:51.727812 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:32:51 crc kubenswrapper[4940]: I1126 09:32:51.728392 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.807349 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p2d4l"] Nov 26 09:32:54 crc kubenswrapper[4940]: E1126 09:32:54.808096 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerName="extract-utilities" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.808109 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerName="extract-utilities" Nov 26 09:32:54 crc kubenswrapper[4940]: E1126 09:32:54.808117 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerName="registry-server" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.808124 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerName="registry-server" Nov 26 09:32:54 crc kubenswrapper[4940]: E1126 09:32:54.808153 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerName="extract-content" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.808159 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerName="extract-content" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.808382 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="dee6f174-4eb7-4c7a-a80b-fb8a5b1336c7" containerName="registry-server" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.809905 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.825946 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p2d4l"] Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.920033 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-utilities\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.920308 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75qph\" (UniqueName: \"kubernetes.io/projected/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-kube-api-access-75qph\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:54 crc kubenswrapper[4940]: I1126 09:32:54.920660 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-catalog-content\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:55 crc kubenswrapper[4940]: I1126 09:32:55.022560 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-utilities\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:55 crc kubenswrapper[4940]: I1126 09:32:55.022665 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75qph\" (UniqueName: \"kubernetes.io/projected/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-kube-api-access-75qph\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:55 crc kubenswrapper[4940]: I1126 09:32:55.022739 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-catalog-content\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:55 crc kubenswrapper[4940]: I1126 09:32:55.023700 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-catalog-content\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:55 crc kubenswrapper[4940]: I1126 09:32:55.023708 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-utilities\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:55 crc kubenswrapper[4940]: I1126 09:32:55.041941 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75qph\" (UniqueName: \"kubernetes.io/projected/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-kube-api-access-75qph\") pod \"redhat-operators-p2d4l\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:55 crc kubenswrapper[4940]: I1126 09:32:55.142733 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:32:55 crc kubenswrapper[4940]: I1126 09:32:55.589794 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p2d4l"] Nov 26 09:32:56 crc kubenswrapper[4940]: W1126 09:32:56.092505 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b2436d1_7450_4ff3_9ba8_b5d75b0f9bf9.slice/crio-97e8ebb50ed74411105c143711c94c496f88ae5e0b5d952bff2c72c5ef1701ed WatchSource:0}: Error finding container 97e8ebb50ed74411105c143711c94c496f88ae5e0b5d952bff2c72c5ef1701ed: Status 404 returned error can't find the container with id 97e8ebb50ed74411105c143711c94c496f88ae5e0b5d952bff2c72c5ef1701ed Nov 26 09:32:56 crc kubenswrapper[4940]: I1126 09:32:56.566974 4940 generic.go:334] "Generic (PLEG): container finished" podID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerID="9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195" exitCode=0 Nov 26 09:32:56 crc kubenswrapper[4940]: I1126 09:32:56.567055 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2d4l" event={"ID":"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9","Type":"ContainerDied","Data":"9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195"} Nov 26 09:32:56 crc kubenswrapper[4940]: I1126 09:32:56.567382 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2d4l" event={"ID":"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9","Type":"ContainerStarted","Data":"97e8ebb50ed74411105c143711c94c496f88ae5e0b5d952bff2c72c5ef1701ed"} Nov 26 09:32:58 crc kubenswrapper[4940]: I1126 09:32:58.591832 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2d4l" event={"ID":"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9","Type":"ContainerStarted","Data":"c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98"} Nov 26 09:33:03 crc kubenswrapper[4940]: I1126 09:33:03.688173 4940 generic.go:334] "Generic (PLEG): container finished" podID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerID="c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98" exitCode=0 Nov 26 09:33:03 crc kubenswrapper[4940]: I1126 09:33:03.688292 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2d4l" event={"ID":"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9","Type":"ContainerDied","Data":"c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98"} Nov 26 09:33:05 crc kubenswrapper[4940]: I1126 09:33:05.715011 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2d4l" event={"ID":"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9","Type":"ContainerStarted","Data":"102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163"} Nov 26 09:33:05 crc kubenswrapper[4940]: I1126 09:33:05.743441 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p2d4l" podStartSLOduration=2.903982188 podStartE2EDuration="11.74341975s" podCreationTimestamp="2025-11-26 09:32:54 +0000 UTC" firstStartedPulling="2025-11-26 09:32:56.569130411 +0000 UTC m=+9478.089272040" lastFinishedPulling="2025-11-26 09:33:05.408567983 +0000 UTC m=+9486.928709602" observedRunningTime="2025-11-26 09:33:05.736259282 +0000 UTC m=+9487.256400901" watchObservedRunningTime="2025-11-26 09:33:05.74341975 +0000 UTC m=+9487.263561369" Nov 26 09:33:15 crc kubenswrapper[4940]: I1126 09:33:15.143597 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:33:15 crc kubenswrapper[4940]: I1126 09:33:15.144262 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:33:16 crc kubenswrapper[4940]: I1126 09:33:16.235775 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p2d4l" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="registry-server" probeResult="failure" output=< Nov 26 09:33:16 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:33:16 crc kubenswrapper[4940]: > Nov 26 09:33:21 crc kubenswrapper[4940]: I1126 09:33:21.728708 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:33:21 crc kubenswrapper[4940]: I1126 09:33:21.729383 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:33:26 crc kubenswrapper[4940]: I1126 09:33:26.199003 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p2d4l" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="registry-server" probeResult="failure" output=< Nov 26 09:33:26 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:33:26 crc kubenswrapper[4940]: > Nov 26 09:33:35 crc kubenswrapper[4940]: I1126 09:33:35.228589 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:33:35 crc kubenswrapper[4940]: I1126 09:33:35.319623 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:33:35 crc kubenswrapper[4940]: I1126 09:33:35.484357 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p2d4l"] Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.160824 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p2d4l" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="registry-server" containerID="cri-o://102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163" gracePeriod=2 Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.758065 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.826613 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-utilities\") pod \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.826682 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-catalog-content\") pod \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.826779 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75qph\" (UniqueName: \"kubernetes.io/projected/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-kube-api-access-75qph\") pod \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\" (UID: \"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9\") " Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.827914 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-utilities" (OuterVolumeSpecName: "utilities") pod "5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" (UID: "5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.835400 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-kube-api-access-75qph" (OuterVolumeSpecName: "kube-api-access-75qph") pod "5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" (UID: "5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9"). InnerVolumeSpecName "kube-api-access-75qph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.932736 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.933282 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75qph\" (UniqueName: \"kubernetes.io/projected/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-kube-api-access-75qph\") on node \"crc\" DevicePath \"\"" Nov 26 09:33:37 crc kubenswrapper[4940]: I1126 09:33:37.954959 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" (UID: "5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.034878 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.196935 4940 generic.go:334] "Generic (PLEG): container finished" podID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerID="102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163" exitCode=0 Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.196989 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2d4l" event={"ID":"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9","Type":"ContainerDied","Data":"102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163"} Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.197019 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p2d4l" event={"ID":"5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9","Type":"ContainerDied","Data":"97e8ebb50ed74411105c143711c94c496f88ae5e0b5d952bff2c72c5ef1701ed"} Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.197053 4940 scope.go:117] "RemoveContainer" containerID="102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.197251 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p2d4l" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.234108 4940 scope.go:117] "RemoveContainer" containerID="c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.244141 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p2d4l"] Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.254135 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p2d4l"] Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.260307 4940 scope.go:117] "RemoveContainer" containerID="9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.312921 4940 scope.go:117] "RemoveContainer" containerID="102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163" Nov 26 09:33:38 crc kubenswrapper[4940]: E1126 09:33:38.313566 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163\": container with ID starting with 102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163 not found: ID does not exist" containerID="102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.313597 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163"} err="failed to get container status \"102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163\": rpc error: code = NotFound desc = could not find container \"102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163\": container with ID starting with 102d0977e9820f8bdfcc2268df6b266c0c085cfb872bb0af0c0082a12a1f7163 not found: ID does not exist" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.313617 4940 scope.go:117] "RemoveContainer" containerID="c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98" Nov 26 09:33:38 crc kubenswrapper[4940]: E1126 09:33:38.313992 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98\": container with ID starting with c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98 not found: ID does not exist" containerID="c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.314091 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98"} err="failed to get container status \"c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98\": rpc error: code = NotFound desc = could not find container \"c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98\": container with ID starting with c273b58eafcfd2b4a201279ec5a6f3ef941b7b6cf2085094330e82398a59de98 not found: ID does not exist" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.314139 4940 scope.go:117] "RemoveContainer" containerID="9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195" Nov 26 09:33:38 crc kubenswrapper[4940]: E1126 09:33:38.314600 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195\": container with ID starting with 9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195 not found: ID does not exist" containerID="9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195" Nov 26 09:33:38 crc kubenswrapper[4940]: I1126 09:33:38.314671 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195"} err="failed to get container status \"9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195\": rpc error: code = NotFound desc = could not find container \"9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195\": container with ID starting with 9ada057dd6313e4d7185c06fba8aa37f711228240fff9f5ac8f9ae75a1d59195 not found: ID does not exist" Nov 26 09:33:39 crc kubenswrapper[4940]: I1126 09:33:39.182349 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" path="/var/lib/kubelet/pods/5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9/volumes" Nov 26 09:33:51 crc kubenswrapper[4940]: I1126 09:33:51.728418 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:33:51 crc kubenswrapper[4940]: I1126 09:33:51.729165 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:33:51 crc kubenswrapper[4940]: I1126 09:33:51.729288 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:33:51 crc kubenswrapper[4940]: I1126 09:33:51.730140 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:33:51 crc kubenswrapper[4940]: I1126 09:33:51.730225 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" gracePeriod=600 Nov 26 09:33:51 crc kubenswrapper[4940]: E1126 09:33:51.861155 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:33:52 crc kubenswrapper[4940]: I1126 09:33:52.385483 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" exitCode=0 Nov 26 09:33:52 crc kubenswrapper[4940]: I1126 09:33:52.385612 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e"} Nov 26 09:33:52 crc kubenswrapper[4940]: I1126 09:33:52.386089 4940 scope.go:117] "RemoveContainer" containerID="ed761410d93cf652e0b5993814b60fdb619a02b40f042d2f0335a6ab015bf4a4" Nov 26 09:33:52 crc kubenswrapper[4940]: I1126 09:33:52.387026 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:33:52 crc kubenswrapper[4940]: E1126 09:33:52.387589 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:34:04 crc kubenswrapper[4940]: I1126 09:34:04.165651 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:34:04 crc kubenswrapper[4940]: E1126 09:34:04.166515 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:34:18 crc kubenswrapper[4940]: I1126 09:34:18.165355 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:34:18 crc kubenswrapper[4940]: E1126 09:34:18.166222 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:34:30 crc kubenswrapper[4940]: I1126 09:34:30.166213 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:34:30 crc kubenswrapper[4940]: E1126 09:34:30.167448 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:34:42 crc kubenswrapper[4940]: I1126 09:34:42.166309 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:34:42 crc kubenswrapper[4940]: E1126 09:34:42.167247 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:34:53 crc kubenswrapper[4940]: I1126 09:34:53.166679 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:34:53 crc kubenswrapper[4940]: E1126 09:34:53.167461 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:35:07 crc kubenswrapper[4940]: I1126 09:35:07.166104 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:35:07 crc kubenswrapper[4940]: E1126 09:35:07.166955 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:35:21 crc kubenswrapper[4940]: I1126 09:35:21.165384 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:35:21 crc kubenswrapper[4940]: E1126 09:35:21.166163 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.316742 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ddb5w"] Nov 26 09:35:27 crc kubenswrapper[4940]: E1126 09:35:27.317836 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="extract-content" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.317852 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="extract-content" Nov 26 09:35:27 crc kubenswrapper[4940]: E1126 09:35:27.317895 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="registry-server" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.317903 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="registry-server" Nov 26 09:35:27 crc kubenswrapper[4940]: E1126 09:35:27.317928 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="extract-utilities" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.317940 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="extract-utilities" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.318226 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b2436d1-7450-4ff3-9ba8-b5d75b0f9bf9" containerName="registry-server" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.320131 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.328954 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ddb5w"] Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.410065 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ghx2\" (UniqueName: \"kubernetes.io/projected/332b8025-07c7-491a-a0c9-2b691aaab9f5-kube-api-access-2ghx2\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.410133 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-catalog-content\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.410527 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-utilities\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.512812 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ghx2\" (UniqueName: \"kubernetes.io/projected/332b8025-07c7-491a-a0c9-2b691aaab9f5-kube-api-access-2ghx2\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.513314 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-catalog-content\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.513791 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-catalog-content\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.513915 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-utilities\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.514202 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-utilities\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.537885 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ghx2\" (UniqueName: \"kubernetes.io/projected/332b8025-07c7-491a-a0c9-2b691aaab9f5-kube-api-access-2ghx2\") pod \"certified-operators-ddb5w\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:27 crc kubenswrapper[4940]: I1126 09:35:27.661592 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:28 crc kubenswrapper[4940]: I1126 09:35:28.421654 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ddb5w"] Nov 26 09:35:28 crc kubenswrapper[4940]: W1126 09:35:28.431099 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod332b8025_07c7_491a_a0c9_2b691aaab9f5.slice/crio-051caa6ee7b42ae0c929a2e05e43dc89509ce56e1d92e00e6f55393c25e148b0 WatchSource:0}: Error finding container 051caa6ee7b42ae0c929a2e05e43dc89509ce56e1d92e00e6f55393c25e148b0: Status 404 returned error can't find the container with id 051caa6ee7b42ae0c929a2e05e43dc89509ce56e1d92e00e6f55393c25e148b0 Nov 26 09:35:28 crc kubenswrapper[4940]: I1126 09:35:28.620467 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ddb5w" event={"ID":"332b8025-07c7-491a-a0c9-2b691aaab9f5","Type":"ContainerStarted","Data":"051caa6ee7b42ae0c929a2e05e43dc89509ce56e1d92e00e6f55393c25e148b0"} Nov 26 09:35:29 crc kubenswrapper[4940]: I1126 09:35:29.640649 4940 generic.go:334] "Generic (PLEG): container finished" podID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerID="55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10" exitCode=0 Nov 26 09:35:29 crc kubenswrapper[4940]: I1126 09:35:29.640974 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ddb5w" event={"ID":"332b8025-07c7-491a-a0c9-2b691aaab9f5","Type":"ContainerDied","Data":"55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10"} Nov 26 09:35:31 crc kubenswrapper[4940]: I1126 09:35:31.671718 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ddb5w" event={"ID":"332b8025-07c7-491a-a0c9-2b691aaab9f5","Type":"ContainerStarted","Data":"8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09"} Nov 26 09:35:32 crc kubenswrapper[4940]: I1126 09:35:32.165437 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:35:32 crc kubenswrapper[4940]: E1126 09:35:32.166067 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:35:32 crc kubenswrapper[4940]: I1126 09:35:32.685802 4940 generic.go:334] "Generic (PLEG): container finished" podID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerID="8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09" exitCode=0 Nov 26 09:35:32 crc kubenswrapper[4940]: I1126 09:35:32.685887 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ddb5w" event={"ID":"332b8025-07c7-491a-a0c9-2b691aaab9f5","Type":"ContainerDied","Data":"8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09"} Nov 26 09:35:33 crc kubenswrapper[4940]: I1126 09:35:33.698724 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ddb5w" event={"ID":"332b8025-07c7-491a-a0c9-2b691aaab9f5","Type":"ContainerStarted","Data":"0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd"} Nov 26 09:35:37 crc kubenswrapper[4940]: I1126 09:35:37.662505 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:37 crc kubenswrapper[4940]: I1126 09:35:37.663360 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:37 crc kubenswrapper[4940]: I1126 09:35:37.727501 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:37 crc kubenswrapper[4940]: I1126 09:35:37.756709 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ddb5w" podStartSLOduration=6.9739384189999996 podStartE2EDuration="10.756690929s" podCreationTimestamp="2025-11-26 09:35:27 +0000 UTC" firstStartedPulling="2025-11-26 09:35:29.646018137 +0000 UTC m=+9631.166159796" lastFinishedPulling="2025-11-26 09:35:33.428770637 +0000 UTC m=+9634.948912306" observedRunningTime="2025-11-26 09:35:33.723222559 +0000 UTC m=+9635.243364198" watchObservedRunningTime="2025-11-26 09:35:37.756690929 +0000 UTC m=+9639.276832548" Nov 26 09:35:44 crc kubenswrapper[4940]: I1126 09:35:44.166238 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:35:44 crc kubenswrapper[4940]: E1126 09:35:44.167153 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:35:47 crc kubenswrapper[4940]: I1126 09:35:47.714557 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:47 crc kubenswrapper[4940]: I1126 09:35:47.766432 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ddb5w"] Nov 26 09:35:47 crc kubenswrapper[4940]: I1126 09:35:47.884767 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ddb5w" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerName="registry-server" containerID="cri-o://0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd" gracePeriod=2 Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.486327 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.507189 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ghx2\" (UniqueName: \"kubernetes.io/projected/332b8025-07c7-491a-a0c9-2b691aaab9f5-kube-api-access-2ghx2\") pod \"332b8025-07c7-491a-a0c9-2b691aaab9f5\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.507558 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-utilities\") pod \"332b8025-07c7-491a-a0c9-2b691aaab9f5\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.507624 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-catalog-content\") pod \"332b8025-07c7-491a-a0c9-2b691aaab9f5\" (UID: \"332b8025-07c7-491a-a0c9-2b691aaab9f5\") " Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.515004 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-utilities" (OuterVolumeSpecName: "utilities") pod "332b8025-07c7-491a-a0c9-2b691aaab9f5" (UID: "332b8025-07c7-491a-a0c9-2b691aaab9f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.516020 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/332b8025-07c7-491a-a0c9-2b691aaab9f5-kube-api-access-2ghx2" (OuterVolumeSpecName: "kube-api-access-2ghx2") pod "332b8025-07c7-491a-a0c9-2b691aaab9f5" (UID: "332b8025-07c7-491a-a0c9-2b691aaab9f5"). InnerVolumeSpecName "kube-api-access-2ghx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.588463 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "332b8025-07c7-491a-a0c9-2b691aaab9f5" (UID: "332b8025-07c7-491a-a0c9-2b691aaab9f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.610147 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ghx2\" (UniqueName: \"kubernetes.io/projected/332b8025-07c7-491a-a0c9-2b691aaab9f5-kube-api-access-2ghx2\") on node \"crc\" DevicePath \"\"" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.610189 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.610201 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/332b8025-07c7-491a-a0c9-2b691aaab9f5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.911179 4940 generic.go:334] "Generic (PLEG): container finished" podID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerID="0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd" exitCode=0 Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.911607 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ddb5w" event={"ID":"332b8025-07c7-491a-a0c9-2b691aaab9f5","Type":"ContainerDied","Data":"0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd"} Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.911653 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ddb5w" event={"ID":"332b8025-07c7-491a-a0c9-2b691aaab9f5","Type":"ContainerDied","Data":"051caa6ee7b42ae0c929a2e05e43dc89509ce56e1d92e00e6f55393c25e148b0"} Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.911685 4940 scope.go:117] "RemoveContainer" containerID="0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.911915 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ddb5w" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.970339 4940 scope.go:117] "RemoveContainer" containerID="8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09" Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.974890 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ddb5w"] Nov 26 09:35:48 crc kubenswrapper[4940]: I1126 09:35:48.989581 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ddb5w"] Nov 26 09:35:49 crc kubenswrapper[4940]: I1126 09:35:49.002096 4940 scope.go:117] "RemoveContainer" containerID="55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10" Nov 26 09:35:49 crc kubenswrapper[4940]: I1126 09:35:49.046122 4940 scope.go:117] "RemoveContainer" containerID="0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd" Nov 26 09:35:49 crc kubenswrapper[4940]: E1126 09:35:49.049749 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd\": container with ID starting with 0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd not found: ID does not exist" containerID="0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd" Nov 26 09:35:49 crc kubenswrapper[4940]: I1126 09:35:49.049816 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd"} err="failed to get container status \"0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd\": rpc error: code = NotFound desc = could not find container \"0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd\": container with ID starting with 0208f9dd00b36149e2440ab192a0c3e7b4f012b21f7c9206ac364fa7666b14dd not found: ID does not exist" Nov 26 09:35:49 crc kubenswrapper[4940]: I1126 09:35:49.049862 4940 scope.go:117] "RemoveContainer" containerID="8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09" Nov 26 09:35:49 crc kubenswrapper[4940]: E1126 09:35:49.050501 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09\": container with ID starting with 8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09 not found: ID does not exist" containerID="8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09" Nov 26 09:35:49 crc kubenswrapper[4940]: I1126 09:35:49.050562 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09"} err="failed to get container status \"8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09\": rpc error: code = NotFound desc = could not find container \"8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09\": container with ID starting with 8fc52fa1b97819f53cc1a479ee52c64782374753220b6507f75a88f389a68a09 not found: ID does not exist" Nov 26 09:35:49 crc kubenswrapper[4940]: I1126 09:35:49.050609 4940 scope.go:117] "RemoveContainer" containerID="55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10" Nov 26 09:35:49 crc kubenswrapper[4940]: E1126 09:35:49.051208 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10\": container with ID starting with 55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10 not found: ID does not exist" containerID="55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10" Nov 26 09:35:49 crc kubenswrapper[4940]: I1126 09:35:49.051246 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10"} err="failed to get container status \"55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10\": rpc error: code = NotFound desc = could not find container \"55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10\": container with ID starting with 55d9c5ce050b2ec2dedfcebe14a1da6498d81e42494cc65605022699005aaf10 not found: ID does not exist" Nov 26 09:35:49 crc kubenswrapper[4940]: I1126 09:35:49.178007 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" path="/var/lib/kubelet/pods/332b8025-07c7-491a-a0c9-2b691aaab9f5/volumes" Nov 26 09:35:55 crc kubenswrapper[4940]: I1126 09:35:55.165544 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:35:55 crc kubenswrapper[4940]: E1126 09:35:55.166291 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:36:08 crc kubenswrapper[4940]: I1126 09:36:08.166111 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:36:08 crc kubenswrapper[4940]: E1126 09:36:08.167444 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:36:22 crc kubenswrapper[4940]: I1126 09:36:22.166100 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:36:22 crc kubenswrapper[4940]: E1126 09:36:22.166974 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:36:36 crc kubenswrapper[4940]: I1126 09:36:36.165953 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:36:36 crc kubenswrapper[4940]: E1126 09:36:36.166948 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.311424 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jvszb"] Nov 26 09:36:42 crc kubenswrapper[4940]: E1126 09:36:42.312371 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerName="extract-content" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.312384 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerName="extract-content" Nov 26 09:36:42 crc kubenswrapper[4940]: E1126 09:36:42.312429 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerName="registry-server" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.312435 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerName="registry-server" Nov 26 09:36:42 crc kubenswrapper[4940]: E1126 09:36:42.312452 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerName="extract-utilities" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.312460 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerName="extract-utilities" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.312667 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="332b8025-07c7-491a-a0c9-2b691aaab9f5" containerName="registry-server" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.314233 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.334169 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jvszb"] Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.393313 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxp6c\" (UniqueName: \"kubernetes.io/projected/aad2bb86-19ea-4133-a2e1-72e7b1380023-kube-api-access-zxp6c\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.393619 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-utilities\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.393795 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-catalog-content\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.496087 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-utilities\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.496167 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-catalog-content\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.496266 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxp6c\" (UniqueName: \"kubernetes.io/projected/aad2bb86-19ea-4133-a2e1-72e7b1380023-kube-api-access-zxp6c\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.496726 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-utilities\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.496798 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-catalog-content\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.786978 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxp6c\" (UniqueName: \"kubernetes.io/projected/aad2bb86-19ea-4133-a2e1-72e7b1380023-kube-api-access-zxp6c\") pod \"redhat-marketplace-jvszb\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:42 crc kubenswrapper[4940]: I1126 09:36:42.941062 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:43 crc kubenswrapper[4940]: I1126 09:36:43.406602 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jvszb"] Nov 26 09:36:43 crc kubenswrapper[4940]: I1126 09:36:43.631735 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jvszb" event={"ID":"aad2bb86-19ea-4133-a2e1-72e7b1380023","Type":"ContainerStarted","Data":"6cbadd86505c31a90c23abde0736ed5cfcc59208946656023d0b43a3e7f3536f"} Nov 26 09:36:44 crc kubenswrapper[4940]: I1126 09:36:44.656715 4940 generic.go:334] "Generic (PLEG): container finished" podID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerID="f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550" exitCode=0 Nov 26 09:36:44 crc kubenswrapper[4940]: I1126 09:36:44.657124 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jvszb" event={"ID":"aad2bb86-19ea-4133-a2e1-72e7b1380023","Type":"ContainerDied","Data":"f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550"} Nov 26 09:36:44 crc kubenswrapper[4940]: I1126 09:36:44.662721 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:36:46 crc kubenswrapper[4940]: I1126 09:36:46.702494 4940 generic.go:334] "Generic (PLEG): container finished" podID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerID="49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4" exitCode=0 Nov 26 09:36:46 crc kubenswrapper[4940]: I1126 09:36:46.702568 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jvszb" event={"ID":"aad2bb86-19ea-4133-a2e1-72e7b1380023","Type":"ContainerDied","Data":"49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4"} Nov 26 09:36:47 crc kubenswrapper[4940]: I1126 09:36:47.714679 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jvszb" event={"ID":"aad2bb86-19ea-4133-a2e1-72e7b1380023","Type":"ContainerStarted","Data":"2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19"} Nov 26 09:36:47 crc kubenswrapper[4940]: I1126 09:36:47.742855 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jvszb" podStartSLOduration=3.255921579 podStartE2EDuration="5.742834918s" podCreationTimestamp="2025-11-26 09:36:42 +0000 UTC" firstStartedPulling="2025-11-26 09:36:44.662076019 +0000 UTC m=+9706.182217658" lastFinishedPulling="2025-11-26 09:36:47.148989368 +0000 UTC m=+9708.669130997" observedRunningTime="2025-11-26 09:36:47.732570972 +0000 UTC m=+9709.252712591" watchObservedRunningTime="2025-11-26 09:36:47.742834918 +0000 UTC m=+9709.262976547" Nov 26 09:36:49 crc kubenswrapper[4940]: I1126 09:36:49.176732 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:36:49 crc kubenswrapper[4940]: E1126 09:36:49.177719 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:36:52 crc kubenswrapper[4940]: I1126 09:36:52.941548 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:52 crc kubenswrapper[4940]: I1126 09:36:52.942363 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:53 crc kubenswrapper[4940]: I1126 09:36:53.020521 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:53 crc kubenswrapper[4940]: I1126 09:36:53.854862 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:53 crc kubenswrapper[4940]: I1126 09:36:53.917519 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jvszb"] Nov 26 09:36:55 crc kubenswrapper[4940]: I1126 09:36:55.805760 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jvszb" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerName="registry-server" containerID="cri-o://2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19" gracePeriod=2 Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.322135 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.412736 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-catalog-content\") pod \"aad2bb86-19ea-4133-a2e1-72e7b1380023\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.412809 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-utilities\") pod \"aad2bb86-19ea-4133-a2e1-72e7b1380023\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.412922 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxp6c\" (UniqueName: \"kubernetes.io/projected/aad2bb86-19ea-4133-a2e1-72e7b1380023-kube-api-access-zxp6c\") pod \"aad2bb86-19ea-4133-a2e1-72e7b1380023\" (UID: \"aad2bb86-19ea-4133-a2e1-72e7b1380023\") " Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.414930 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-utilities" (OuterVolumeSpecName: "utilities") pod "aad2bb86-19ea-4133-a2e1-72e7b1380023" (UID: "aad2bb86-19ea-4133-a2e1-72e7b1380023"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.422725 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aad2bb86-19ea-4133-a2e1-72e7b1380023-kube-api-access-zxp6c" (OuterVolumeSpecName: "kube-api-access-zxp6c") pod "aad2bb86-19ea-4133-a2e1-72e7b1380023" (UID: "aad2bb86-19ea-4133-a2e1-72e7b1380023"). InnerVolumeSpecName "kube-api-access-zxp6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.434890 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aad2bb86-19ea-4133-a2e1-72e7b1380023" (UID: "aad2bb86-19ea-4133-a2e1-72e7b1380023"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.515067 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxp6c\" (UniqueName: \"kubernetes.io/projected/aad2bb86-19ea-4133-a2e1-72e7b1380023-kube-api-access-zxp6c\") on node \"crc\" DevicePath \"\"" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.515113 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.515127 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aad2bb86-19ea-4133-a2e1-72e7b1380023-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.815276 4940 generic.go:334] "Generic (PLEG): container finished" podID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerID="2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19" exitCode=0 Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.815375 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jvszb" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.815378 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jvszb" event={"ID":"aad2bb86-19ea-4133-a2e1-72e7b1380023","Type":"ContainerDied","Data":"2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19"} Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.815691 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jvszb" event={"ID":"aad2bb86-19ea-4133-a2e1-72e7b1380023","Type":"ContainerDied","Data":"6cbadd86505c31a90c23abde0736ed5cfcc59208946656023d0b43a3e7f3536f"} Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.815713 4940 scope.go:117] "RemoveContainer" containerID="2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.849476 4940 scope.go:117] "RemoveContainer" containerID="49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.868115 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jvszb"] Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.880527 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jvszb"] Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.891083 4940 scope.go:117] "RemoveContainer" containerID="f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.924471 4940 scope.go:117] "RemoveContainer" containerID="2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19" Nov 26 09:36:56 crc kubenswrapper[4940]: E1126 09:36:56.924956 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19\": container with ID starting with 2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19 not found: ID does not exist" containerID="2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.924998 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19"} err="failed to get container status \"2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19\": rpc error: code = NotFound desc = could not find container \"2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19\": container with ID starting with 2d676378c4f9d748a76679d7477a4df415ceb5513a8af801c308aa53c5affc19 not found: ID does not exist" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.925026 4940 scope.go:117] "RemoveContainer" containerID="49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4" Nov 26 09:36:56 crc kubenswrapper[4940]: E1126 09:36:56.925751 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4\": container with ID starting with 49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4 not found: ID does not exist" containerID="49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.925783 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4"} err="failed to get container status \"49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4\": rpc error: code = NotFound desc = could not find container \"49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4\": container with ID starting with 49fc859d56ea40d5543618fb00d5a66eeafce4c7b693f27856e7d0973bf866c4 not found: ID does not exist" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.925806 4940 scope.go:117] "RemoveContainer" containerID="f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550" Nov 26 09:36:56 crc kubenswrapper[4940]: E1126 09:36:56.926180 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550\": container with ID starting with f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550 not found: ID does not exist" containerID="f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550" Nov 26 09:36:56 crc kubenswrapper[4940]: I1126 09:36:56.926220 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550"} err="failed to get container status \"f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550\": rpc error: code = NotFound desc = could not find container \"f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550\": container with ID starting with f3fc8f6f5ce229ccc659e195a78517236c6c12171091200f189fe7210171e550 not found: ID does not exist" Nov 26 09:36:57 crc kubenswrapper[4940]: I1126 09:36:57.180613 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" path="/var/lib/kubelet/pods/aad2bb86-19ea-4133-a2e1-72e7b1380023/volumes" Nov 26 09:37:03 crc kubenswrapper[4940]: I1126 09:37:03.165327 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:37:03 crc kubenswrapper[4940]: E1126 09:37:03.166086 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:37:16 crc kubenswrapper[4940]: I1126 09:37:16.165615 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:37:16 crc kubenswrapper[4940]: E1126 09:37:16.166793 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:37:30 crc kubenswrapper[4940]: I1126 09:37:30.166280 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:37:30 crc kubenswrapper[4940]: E1126 09:37:30.167238 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:37:45 crc kubenswrapper[4940]: I1126 09:37:45.165932 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:37:45 crc kubenswrapper[4940]: E1126 09:37:45.167244 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:37:56 crc kubenswrapper[4940]: I1126 09:37:56.171242 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:37:56 crc kubenswrapper[4940]: E1126 09:37:56.172334 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:38:05 crc kubenswrapper[4940]: I1126 09:38:05.245916 4940 generic.go:334] "Generic (PLEG): container finished" podID="9fc977d5-5151-447c-8006-0a318ab3b23e" containerID="f7f73c6ba18c57b58d9f88344a19f0f410c90097bb2ed11294d58ad3416e5a1c" exitCode=0 Nov 26 09:38:05 crc kubenswrapper[4940]: I1126 09:38:05.246021 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" event={"ID":"9fc977d5-5151-447c-8006-0a318ab3b23e","Type":"ContainerDied","Data":"f7f73c6ba18c57b58d9f88344a19f0f410c90097bb2ed11294d58ad3416e5a1c"} Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.768341 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.869578 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-inventory\") pod \"9fc977d5-5151-447c-8006-0a318ab3b23e\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.870231 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nd8sj\" (UniqueName: \"kubernetes.io/projected/9fc977d5-5151-447c-8006-0a318ab3b23e-kube-api-access-nd8sj\") pod \"9fc977d5-5151-447c-8006-0a318ab3b23e\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.870281 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-1\") pod \"9fc977d5-5151-447c-8006-0a318ab3b23e\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.870314 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-0\") pod \"9fc977d5-5151-447c-8006-0a318ab3b23e\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.870354 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ssh-key\") pod \"9fc977d5-5151-447c-8006-0a318ab3b23e\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.870434 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-2\") pod \"9fc977d5-5151-447c-8006-0a318ab3b23e\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.870459 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-telemetry-combined-ca-bundle\") pod \"9fc977d5-5151-447c-8006-0a318ab3b23e\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.870510 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceph\") pod \"9fc977d5-5151-447c-8006-0a318ab3b23e\" (UID: \"9fc977d5-5151-447c-8006-0a318ab3b23e\") " Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.875256 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceph" (OuterVolumeSpecName: "ceph") pod "9fc977d5-5151-447c-8006-0a318ab3b23e" (UID: "9fc977d5-5151-447c-8006-0a318ab3b23e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.876596 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fc977d5-5151-447c-8006-0a318ab3b23e-kube-api-access-nd8sj" (OuterVolumeSpecName: "kube-api-access-nd8sj") pod "9fc977d5-5151-447c-8006-0a318ab3b23e" (UID: "9fc977d5-5151-447c-8006-0a318ab3b23e"). InnerVolumeSpecName "kube-api-access-nd8sj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.882315 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "9fc977d5-5151-447c-8006-0a318ab3b23e" (UID: "9fc977d5-5151-447c-8006-0a318ab3b23e"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.898724 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "9fc977d5-5151-447c-8006-0a318ab3b23e" (UID: "9fc977d5-5151-447c-8006-0a318ab3b23e"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.898741 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "9fc977d5-5151-447c-8006-0a318ab3b23e" (UID: "9fc977d5-5151-447c-8006-0a318ab3b23e"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.900149 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-inventory" (OuterVolumeSpecName: "inventory") pod "9fc977d5-5151-447c-8006-0a318ab3b23e" (UID: "9fc977d5-5151-447c-8006-0a318ab3b23e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.900235 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "9fc977d5-5151-447c-8006-0a318ab3b23e" (UID: "9fc977d5-5151-447c-8006-0a318ab3b23e"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.904399 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9fc977d5-5151-447c-8006-0a318ab3b23e" (UID: "9fc977d5-5151-447c-8006-0a318ab3b23e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.973183 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.973438 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.973449 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nd8sj\" (UniqueName: \"kubernetes.io/projected/9fc977d5-5151-447c-8006-0a318ab3b23e-kube-api-access-nd8sj\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.973458 4940 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.973468 4940 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.973479 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.973488 4940 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:06 crc kubenswrapper[4940]: I1126 09:38:06.973500 4940 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fc977d5-5151-447c-8006-0a318ab3b23e-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.286719 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" event={"ID":"9fc977d5-5151-447c-8006-0a318ab3b23e","Type":"ContainerDied","Data":"9bc63b1a2d695120293028e3999c06c3c00f764d9fbed2dce9b2668380a6e4d6"} Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.286770 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bc63b1a2d695120293028e3999c06c3c00f764d9fbed2dce9b2668380a6e4d6" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.286846 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-8qd7v" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.405304 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-sx8hp"] Nov 26 09:38:07 crc kubenswrapper[4940]: E1126 09:38:07.405821 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fc977d5-5151-447c-8006-0a318ab3b23e" containerName="telemetry-openstack-openstack-cell1" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.405846 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fc977d5-5151-447c-8006-0a318ab3b23e" containerName="telemetry-openstack-openstack-cell1" Nov 26 09:38:07 crc kubenswrapper[4940]: E1126 09:38:07.405890 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerName="extract-content" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.405900 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerName="extract-content" Nov 26 09:38:07 crc kubenswrapper[4940]: E1126 09:38:07.405922 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerName="extract-utilities" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.405931 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerName="extract-utilities" Nov 26 09:38:07 crc kubenswrapper[4940]: E1126 09:38:07.405947 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerName="registry-server" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.405955 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerName="registry-server" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.406312 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fc977d5-5151-447c-8006-0a318ab3b23e" containerName="telemetry-openstack-openstack-cell1" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.406348 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="aad2bb86-19ea-4133-a2e1-72e7b1380023" containerName="registry-server" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.407164 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.409823 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.413958 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.414822 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.415349 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.415697 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.420683 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-sx8hp"] Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.588566 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.588648 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7hrs\" (UniqueName: \"kubernetes.io/projected/af672acf-5122-4828-9aa1-ec7921df77ec-kube-api-access-h7hrs\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.588679 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.588711 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.588751 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.589055 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.691032 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.691134 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7hrs\" (UniqueName: \"kubernetes.io/projected/af672acf-5122-4828-9aa1-ec7921df77ec-kube-api-access-h7hrs\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.691163 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.691189 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.691212 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.692165 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.696732 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.699142 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.704539 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.706016 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.708717 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7hrs\" (UniqueName: \"kubernetes.io/projected/af672acf-5122-4828-9aa1-ec7921df77ec-kube-api-access-h7hrs\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.710067 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-sx8hp\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:07 crc kubenswrapper[4940]: I1126 09:38:07.727145 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:08 crc kubenswrapper[4940]: I1126 09:38:08.368783 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-sx8hp"] Nov 26 09:38:08 crc kubenswrapper[4940]: W1126 09:38:08.373194 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf672acf_5122_4828_9aa1_ec7921df77ec.slice/crio-a8b56dff47d5edc2511a6d8d1c2dad238e2e8ba5720715306afe1df1dfd8314f WatchSource:0}: Error finding container a8b56dff47d5edc2511a6d8d1c2dad238e2e8ba5720715306afe1df1dfd8314f: Status 404 returned error can't find the container with id a8b56dff47d5edc2511a6d8d1c2dad238e2e8ba5720715306afe1df1dfd8314f Nov 26 09:38:09 crc kubenswrapper[4940]: I1126 09:38:09.314009 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" event={"ID":"af672acf-5122-4828-9aa1-ec7921df77ec","Type":"ContainerStarted","Data":"e323b244647b17c94496c3332822c9e0b11dfdfbf47f8ec843547e8fde7980ef"} Nov 26 09:38:09 crc kubenswrapper[4940]: I1126 09:38:09.314340 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" event={"ID":"af672acf-5122-4828-9aa1-ec7921df77ec","Type":"ContainerStarted","Data":"a8b56dff47d5edc2511a6d8d1c2dad238e2e8ba5720715306afe1df1dfd8314f"} Nov 26 09:38:09 crc kubenswrapper[4940]: I1126 09:38:09.352404 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" podStartSLOduration=1.892293409 podStartE2EDuration="2.352384312s" podCreationTimestamp="2025-11-26 09:38:07 +0000 UTC" firstStartedPulling="2025-11-26 09:38:08.375541946 +0000 UTC m=+9789.895683575" lastFinishedPulling="2025-11-26 09:38:08.835632849 +0000 UTC m=+9790.355774478" observedRunningTime="2025-11-26 09:38:09.3362674 +0000 UTC m=+9790.856409059" watchObservedRunningTime="2025-11-26 09:38:09.352384312 +0000 UTC m=+9790.872525931" Nov 26 09:38:11 crc kubenswrapper[4940]: I1126 09:38:11.165783 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:38:11 crc kubenswrapper[4940]: E1126 09:38:11.166708 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:38:24 crc kubenswrapper[4940]: I1126 09:38:24.166289 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:38:24 crc kubenswrapper[4940]: E1126 09:38:24.167430 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:38:36 crc kubenswrapper[4940]: I1126 09:38:36.166717 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:38:36 crc kubenswrapper[4940]: E1126 09:38:36.167432 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:38:48 crc kubenswrapper[4940]: I1126 09:38:48.165410 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:38:48 crc kubenswrapper[4940]: E1126 09:38:48.166164 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:38:55 crc kubenswrapper[4940]: I1126 09:38:55.914354 4940 generic.go:334] "Generic (PLEG): container finished" podID="af672acf-5122-4828-9aa1-ec7921df77ec" containerID="e323b244647b17c94496c3332822c9e0b11dfdfbf47f8ec843547e8fde7980ef" exitCode=0 Nov 26 09:38:55 crc kubenswrapper[4940]: I1126 09:38:55.914420 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" event={"ID":"af672acf-5122-4828-9aa1-ec7921df77ec","Type":"ContainerDied","Data":"e323b244647b17c94496c3332822c9e0b11dfdfbf47f8ec843547e8fde7980ef"} Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.427866 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.599638 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-inventory\") pod \"af672acf-5122-4828-9aa1-ec7921df77ec\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.599918 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-agent-neutron-config-0\") pod \"af672acf-5122-4828-9aa1-ec7921df77ec\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.599973 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7hrs\" (UniqueName: \"kubernetes.io/projected/af672acf-5122-4828-9aa1-ec7921df77ec-kube-api-access-h7hrs\") pod \"af672acf-5122-4828-9aa1-ec7921df77ec\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.600089 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-combined-ca-bundle\") pod \"af672acf-5122-4828-9aa1-ec7921df77ec\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.600117 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ceph\") pod \"af672acf-5122-4828-9aa1-ec7921df77ec\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.600225 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ssh-key\") pod \"af672acf-5122-4828-9aa1-ec7921df77ec\" (UID: \"af672acf-5122-4828-9aa1-ec7921df77ec\") " Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.604825 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ceph" (OuterVolumeSpecName: "ceph") pod "af672acf-5122-4828-9aa1-ec7921df77ec" (UID: "af672acf-5122-4828-9aa1-ec7921df77ec"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.614669 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "af672acf-5122-4828-9aa1-ec7921df77ec" (UID: "af672acf-5122-4828-9aa1-ec7921df77ec"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.615892 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af672acf-5122-4828-9aa1-ec7921df77ec-kube-api-access-h7hrs" (OuterVolumeSpecName: "kube-api-access-h7hrs") pod "af672acf-5122-4828-9aa1-ec7921df77ec" (UID: "af672acf-5122-4828-9aa1-ec7921df77ec"). InnerVolumeSpecName "kube-api-access-h7hrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.639251 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-sriov-agent-neutron-config-0") pod "af672acf-5122-4828-9aa1-ec7921df77ec" (UID: "af672acf-5122-4828-9aa1-ec7921df77ec"). InnerVolumeSpecName "neutron-sriov-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.639457 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-inventory" (OuterVolumeSpecName: "inventory") pod "af672acf-5122-4828-9aa1-ec7921df77ec" (UID: "af672acf-5122-4828-9aa1-ec7921df77ec"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.668254 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "af672acf-5122-4828-9aa1-ec7921df77ec" (UID: "af672acf-5122-4828-9aa1-ec7921df77ec"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.702823 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7hrs\" (UniqueName: \"kubernetes.io/projected/af672acf-5122-4828-9aa1-ec7921df77ec-kube-api-access-h7hrs\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.702856 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.702867 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.702878 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.702887 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.702898 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/af672acf-5122-4828-9aa1-ec7921df77ec-neutron-sriov-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.941598 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.941583 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-sx8hp" event={"ID":"af672acf-5122-4828-9aa1-ec7921df77ec","Type":"ContainerDied","Data":"a8b56dff47d5edc2511a6d8d1c2dad238e2e8ba5720715306afe1df1dfd8314f"} Nov 26 09:38:57 crc kubenswrapper[4940]: I1126 09:38:57.941748 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a8b56dff47d5edc2511a6d8d1c2dad238e2e8ba5720715306afe1df1dfd8314f" Nov 26 09:38:58 crc kubenswrapper[4940]: E1126 09:38:58.021222 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf672acf_5122_4828_9aa1_ec7921df77ec.slice\": RecentStats: unable to find data in memory cache]" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.068228 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-s5g27"] Nov 26 09:38:58 crc kubenswrapper[4940]: E1126 09:38:58.069192 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af672acf-5122-4828-9aa1-ec7921df77ec" containerName="neutron-sriov-openstack-openstack-cell1" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.069212 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="af672acf-5122-4828-9aa1-ec7921df77ec" containerName="neutron-sriov-openstack-openstack-cell1" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.069508 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="af672acf-5122-4828-9aa1-ec7921df77ec" containerName="neutron-sriov-openstack-openstack-cell1" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.070279 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.071842 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-dhcp-agent-neutron-config" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.072946 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.073775 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.073872 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.074140 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.088418 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-s5g27"] Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.217579 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.217677 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.217812 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.217892 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.218310 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6tcm\" (UniqueName: \"kubernetes.io/projected/f0ddfc1d-2654-4502-8476-737a6675dc35-kube-api-access-g6tcm\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.218384 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.320459 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.320525 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.320651 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.320708 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.320825 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6tcm\" (UniqueName: \"kubernetes.io/projected/f0ddfc1d-2654-4502-8476-737a6675dc35-kube-api-access-g6tcm\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.320879 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.325672 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.326599 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.327276 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.332813 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.334823 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.344361 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6tcm\" (UniqueName: \"kubernetes.io/projected/f0ddfc1d-2654-4502-8476-737a6675dc35-kube-api-access-g6tcm\") pod \"neutron-dhcp-openstack-openstack-cell1-s5g27\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:58 crc kubenswrapper[4940]: I1126 09:38:58.399923 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:38:59 crc kubenswrapper[4940]: I1126 09:38:59.030988 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-s5g27"] Nov 26 09:38:59 crc kubenswrapper[4940]: I1126 09:38:59.550743 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:38:59 crc kubenswrapper[4940]: I1126 09:38:59.965820 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" event={"ID":"f0ddfc1d-2654-4502-8476-737a6675dc35","Type":"ContainerStarted","Data":"13f6ebd8d23ded1fd14af5483295c21411ad1f1f577199294c0e62c657605edc"} Nov 26 09:38:59 crc kubenswrapper[4940]: I1126 09:38:59.966220 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" event={"ID":"f0ddfc1d-2654-4502-8476-737a6675dc35","Type":"ContainerStarted","Data":"875eba93c55ef173040126e949ce03524e7a97d55e6ceca6899c79283e3dddf4"} Nov 26 09:38:59 crc kubenswrapper[4940]: I1126 09:38:59.999229 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" podStartSLOduration=1.486545662 podStartE2EDuration="1.999210666s" podCreationTimestamp="2025-11-26 09:38:58 +0000 UTC" firstStartedPulling="2025-11-26 09:38:59.035685353 +0000 UTC m=+9840.555826972" lastFinishedPulling="2025-11-26 09:38:59.548350327 +0000 UTC m=+9841.068491976" observedRunningTime="2025-11-26 09:38:59.988848346 +0000 UTC m=+9841.508989965" watchObservedRunningTime="2025-11-26 09:38:59.999210666 +0000 UTC m=+9841.519352285" Nov 26 09:39:03 crc kubenswrapper[4940]: I1126 09:39:03.165840 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:39:04 crc kubenswrapper[4940]: I1126 09:39:04.020061 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"47d5d2db4ba968e85e34ad466f92d109f867faaa638ca2b2990de6899520ee90"} Nov 26 09:40:03 crc kubenswrapper[4940]: I1126 09:40:03.766858 4940 generic.go:334] "Generic (PLEG): container finished" podID="f0ddfc1d-2654-4502-8476-737a6675dc35" containerID="13f6ebd8d23ded1fd14af5483295c21411ad1f1f577199294c0e62c657605edc" exitCode=0 Nov 26 09:40:03 crc kubenswrapper[4940]: I1126 09:40:03.766951 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" event={"ID":"f0ddfc1d-2654-4502-8476-737a6675dc35","Type":"ContainerDied","Data":"13f6ebd8d23ded1fd14af5483295c21411ad1f1f577199294c0e62c657605edc"} Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.303343 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.476315 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ssh-key\") pod \"f0ddfc1d-2654-4502-8476-737a6675dc35\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.476381 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-agent-neutron-config-0\") pod \"f0ddfc1d-2654-4502-8476-737a6675dc35\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.476483 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-inventory\") pod \"f0ddfc1d-2654-4502-8476-737a6675dc35\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.476527 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6tcm\" (UniqueName: \"kubernetes.io/projected/f0ddfc1d-2654-4502-8476-737a6675dc35-kube-api-access-g6tcm\") pod \"f0ddfc1d-2654-4502-8476-737a6675dc35\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.476555 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-combined-ca-bundle\") pod \"f0ddfc1d-2654-4502-8476-737a6675dc35\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.476580 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ceph\") pod \"f0ddfc1d-2654-4502-8476-737a6675dc35\" (UID: \"f0ddfc1d-2654-4502-8476-737a6675dc35\") " Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.482466 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ceph" (OuterVolumeSpecName: "ceph") pod "f0ddfc1d-2654-4502-8476-737a6675dc35" (UID: "f0ddfc1d-2654-4502-8476-737a6675dc35"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.487180 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "f0ddfc1d-2654-4502-8476-737a6675dc35" (UID: "f0ddfc1d-2654-4502-8476-737a6675dc35"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.487571 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0ddfc1d-2654-4502-8476-737a6675dc35-kube-api-access-g6tcm" (OuterVolumeSpecName: "kube-api-access-g6tcm") pod "f0ddfc1d-2654-4502-8476-737a6675dc35" (UID: "f0ddfc1d-2654-4502-8476-737a6675dc35"). InnerVolumeSpecName "kube-api-access-g6tcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.511412 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-inventory" (OuterVolumeSpecName: "inventory") pod "f0ddfc1d-2654-4502-8476-737a6675dc35" (UID: "f0ddfc1d-2654-4502-8476-737a6675dc35"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.511435 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f0ddfc1d-2654-4502-8476-737a6675dc35" (UID: "f0ddfc1d-2654-4502-8476-737a6675dc35"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.523055 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-dhcp-agent-neutron-config-0") pod "f0ddfc1d-2654-4502-8476-737a6675dc35" (UID: "f0ddfc1d-2654-4502-8476-737a6675dc35"). InnerVolumeSpecName "neutron-dhcp-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.579536 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6tcm\" (UniqueName: \"kubernetes.io/projected/f0ddfc1d-2654-4502-8476-737a6675dc35-kube-api-access-g6tcm\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.579573 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.579586 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.579595 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.579608 4940 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-neutron-dhcp-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.579616 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0ddfc1d-2654-4502-8476-737a6675dc35-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.786504 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" event={"ID":"f0ddfc1d-2654-4502-8476-737a6675dc35","Type":"ContainerDied","Data":"875eba93c55ef173040126e949ce03524e7a97d55e6ceca6899c79283e3dddf4"} Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.786543 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="875eba93c55ef173040126e949ce03524e7a97d55e6ceca6899c79283e3dddf4" Nov 26 09:40:05 crc kubenswrapper[4940]: I1126 09:40:05.786602 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-s5g27" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.022192 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.023264 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="16d21285-da91-4a46-8443-7cbdfe13cd1c" containerName="nova-cell0-conductor-conductor" containerID="cri-o://7839800394ff273955e113f230a0c7c3891774792b193261806e9ef7465de682" gracePeriod=30 Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.528395 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.528872 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" containerName="nova-cell1-conductor-conductor" containerID="cri-o://b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08" gracePeriod=30 Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.692975 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l"] Nov 26 09:40:35 crc kubenswrapper[4940]: E1126 09:40:35.693674 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0ddfc1d-2654-4502-8476-737a6675dc35" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.693698 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0ddfc1d-2654-4502-8476-737a6675dc35" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.694009 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0ddfc1d-2654-4502-8476-737a6675dc35" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.695108 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.700586 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-n27vj" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.701032 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.701339 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.701524 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.701683 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.702201 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.706268 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.712571 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l"] Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.735129 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.735437 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f0b14554-e939-4a03-95a3-10517457591a" containerName="nova-scheduler-scheduler" containerID="cri-o://2ca50f6cda18fa9be1e2d835aaa30879b39f9dc7cbe5f1764500dd3ef907357f" gracePeriod=30 Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.763462 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.763729 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-log" containerID="cri-o://a2602e1a43f7c9a447a7d77ead2b8bda0ab0ab0f32d87126ff38e712bcc5cc84" gracePeriod=30 Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.764282 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-api" containerID="cri-o://87a17963a9354e55ecada023c358dba20d8b3c3d610fe454219ac91a85895250" gracePeriod=30 Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.803580 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.804196 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-log" containerID="cri-o://8cd4682e79902c32f98b0243d6629777b564b16dd50f6b2c5051a7d77c6c8f27" gracePeriod=30 Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.804895 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-metadata" containerID="cri-o://61e408d219bb80c5fcd96efc5eb0ad4b7a9f4e6a5c5c4677e5d950b70dcf7198" gracePeriod=30 Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.822833 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.822882 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.822970 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.823006 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.823026 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.823058 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.823095 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.823152 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xdjh\" (UniqueName: \"kubernetes.io/projected/4fd0ed71-a15f-4e19-a43f-2822fc14199a-kube-api-access-9xdjh\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.823171 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.823247 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.823269 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925264 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925330 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925355 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925376 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925416 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925448 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xdjh\" (UniqueName: \"kubernetes.io/projected/4fd0ed71-a15f-4e19-a43f-2822fc14199a-kube-api-access-9xdjh\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925467 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925531 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925552 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925585 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.925600 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.927571 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.928805 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.932685 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.932863 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.932868 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.933633 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.933959 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.936468 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.936714 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.939146 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:35 crc kubenswrapper[4940]: I1126 09:40:35.957930 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xdjh\" (UniqueName: \"kubernetes.io/projected/4fd0ed71-a15f-4e19-a43f-2822fc14199a-kube-api-access-9xdjh\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.046940 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.163284 4940 generic.go:334] "Generic (PLEG): container finished" podID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerID="8cd4682e79902c32f98b0243d6629777b564b16dd50f6b2c5051a7d77c6c8f27" exitCode=143 Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.163355 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cf79c6-c1be-4547-b294-f2f7361aa574","Type":"ContainerDied","Data":"8cd4682e79902c32f98b0243d6629777b564b16dd50f6b2c5051a7d77c6c8f27"} Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.165662 4940 generic.go:334] "Generic (PLEG): container finished" podID="912011e6-2802-415f-9e1b-39ddfec0f182" containerID="a2602e1a43f7c9a447a7d77ead2b8bda0ab0ab0f32d87126ff38e712bcc5cc84" exitCode=143 Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.165704 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"912011e6-2802-415f-9e1b-39ddfec0f182","Type":"ContainerDied","Data":"a2602e1a43f7c9a447a7d77ead2b8bda0ab0ab0f32d87126ff38e712bcc5cc84"} Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.676887 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l"] Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.705825 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.848888 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2cf7\" (UniqueName: \"kubernetes.io/projected/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-kube-api-access-z2cf7\") pod \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.848971 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-config-data\") pod \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.850101 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-combined-ca-bundle\") pod \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\" (UID: \"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa\") " Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.859729 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-kube-api-access-z2cf7" (OuterVolumeSpecName: "kube-api-access-z2cf7") pod "e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" (UID: "e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa"). InnerVolumeSpecName "kube-api-access-z2cf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.896335 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" (UID: "e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.907104 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-config-data" (OuterVolumeSpecName: "config-data") pod "e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" (UID: "e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.954077 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.954133 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:36 crc kubenswrapper[4940]: I1126 09:40:36.954194 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2cf7\" (UniqueName: \"kubernetes.io/projected/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa-kube-api-access-z2cf7\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.181115 4940 generic.go:334] "Generic (PLEG): container finished" podID="e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" containerID="b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08" exitCode=0 Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.181228 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.183170 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa","Type":"ContainerDied","Data":"b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08"} Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.183209 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa","Type":"ContainerDied","Data":"af2753fe05110e0f319a53efbf8ec6e8ff19433ac569674601633541f3ffe9eb"} Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.183226 4940 scope.go:117] "RemoveContainer" containerID="b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.191604 4940 generic.go:334] "Generic (PLEG): container finished" podID="16d21285-da91-4a46-8443-7cbdfe13cd1c" containerID="7839800394ff273955e113f230a0c7c3891774792b193261806e9ef7465de682" exitCode=0 Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.191669 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"16d21285-da91-4a46-8443-7cbdfe13cd1c","Type":"ContainerDied","Data":"7839800394ff273955e113f230a0c7c3891774792b193261806e9ef7465de682"} Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.193939 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" event={"ID":"4fd0ed71-a15f-4e19-a43f-2822fc14199a","Type":"ContainerStarted","Data":"d29f4248c74e3213e247ad7f71ddd813938ea01512ac6bc91f7983a428c834bc"} Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.195336 4940 generic.go:334] "Generic (PLEG): container finished" podID="f0b14554-e939-4a03-95a3-10517457591a" containerID="2ca50f6cda18fa9be1e2d835aaa30879b39f9dc7cbe5f1764500dd3ef907357f" exitCode=0 Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.195385 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f0b14554-e939-4a03-95a3-10517457591a","Type":"ContainerDied","Data":"2ca50f6cda18fa9be1e2d835aaa30879b39f9dc7cbe5f1764500dd3ef907357f"} Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.195426 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f0b14554-e939-4a03-95a3-10517457591a","Type":"ContainerDied","Data":"9f021f15c6cab9c8930a67047d69ead1f35edc0a4658c811ffb8d806604e35bd"} Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.195447 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f021f15c6cab9c8930a67047d69ead1f35edc0a4658c811ffb8d806604e35bd" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.243763 4940 scope.go:117] "RemoveContainer" containerID="b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08" Nov 26 09:40:37 crc kubenswrapper[4940]: E1126 09:40:37.244360 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08\": container with ID starting with b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08 not found: ID does not exist" containerID="b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.244409 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08"} err="failed to get container status \"b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08\": rpc error: code = NotFound desc = could not find container \"b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08\": container with ID starting with b9b7f33d23e74cf70fcf593dd55141793887e921a5b8434af24b61c445c2fa08 not found: ID does not exist" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.271948 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.297758 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.331255 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.372093 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 09:40:37 crc kubenswrapper[4940]: E1126 09:40:37.372611 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0b14554-e939-4a03-95a3-10517457591a" containerName="nova-scheduler-scheduler" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.372631 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0b14554-e939-4a03-95a3-10517457591a" containerName="nova-scheduler-scheduler" Nov 26 09:40:37 crc kubenswrapper[4940]: E1126 09:40:37.372657 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" containerName="nova-cell1-conductor-conductor" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.372664 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" containerName="nova-cell1-conductor-conductor" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.372904 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" containerName="nova-cell1-conductor-conductor" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.372931 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0b14554-e939-4a03-95a3-10517457591a" containerName="nova-scheduler-scheduler" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.373689 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.375499 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.383632 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.465694 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-combined-ca-bundle\") pod \"f0b14554-e939-4a03-95a3-10517457591a\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.466341 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xzqg\" (UniqueName: \"kubernetes.io/projected/f0b14554-e939-4a03-95a3-10517457591a-kube-api-access-5xzqg\") pod \"f0b14554-e939-4a03-95a3-10517457591a\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.466447 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-config-data\") pod \"f0b14554-e939-4a03-95a3-10517457591a\" (UID: \"f0b14554-e939-4a03-95a3-10517457591a\") " Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.482255 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0b14554-e939-4a03-95a3-10517457591a-kube-api-access-5xzqg" (OuterVolumeSpecName: "kube-api-access-5xzqg") pod "f0b14554-e939-4a03-95a3-10517457591a" (UID: "f0b14554-e939-4a03-95a3-10517457591a"). InnerVolumeSpecName "kube-api-access-5xzqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.508338 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-config-data" (OuterVolumeSpecName: "config-data") pod "f0b14554-e939-4a03-95a3-10517457591a" (UID: "f0b14554-e939-4a03-95a3-10517457591a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.511956 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f0b14554-e939-4a03-95a3-10517457591a" (UID: "f0b14554-e939-4a03-95a3-10517457591a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.569666 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9629092f-7cc7-4e81-94b2-8a021a314962-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.569714 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g88t\" (UniqueName: \"kubernetes.io/projected/9629092f-7cc7-4e81-94b2-8a021a314962-kube-api-access-4g88t\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.569857 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9629092f-7cc7-4e81-94b2-8a021a314962-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.569909 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xzqg\" (UniqueName: \"kubernetes.io/projected/f0b14554-e939-4a03-95a3-10517457591a-kube-api-access-5xzqg\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.569922 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.569931 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0b14554-e939-4a03-95a3-10517457591a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.614281 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.672201 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9629092f-7cc7-4e81-94b2-8a021a314962-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.672307 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9629092f-7cc7-4e81-94b2-8a021a314962-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.672332 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g88t\" (UniqueName: \"kubernetes.io/projected/9629092f-7cc7-4e81-94b2-8a021a314962-kube-api-access-4g88t\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.678567 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9629092f-7cc7-4e81-94b2-8a021a314962-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.679099 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9629092f-7cc7-4e81-94b2-8a021a314962-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.693625 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g88t\" (UniqueName: \"kubernetes.io/projected/9629092f-7cc7-4e81-94b2-8a021a314962-kube-api-access-4g88t\") pod \"nova-cell1-conductor-0\" (UID: \"9629092f-7cc7-4e81-94b2-8a021a314962\") " pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.696860 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.773806 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2m7wk\" (UniqueName: \"kubernetes.io/projected/16d21285-da91-4a46-8443-7cbdfe13cd1c-kube-api-access-2m7wk\") pod \"16d21285-da91-4a46-8443-7cbdfe13cd1c\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.774147 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-config-data\") pod \"16d21285-da91-4a46-8443-7cbdfe13cd1c\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.774304 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-combined-ca-bundle\") pod \"16d21285-da91-4a46-8443-7cbdfe13cd1c\" (UID: \"16d21285-da91-4a46-8443-7cbdfe13cd1c\") " Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.779506 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16d21285-da91-4a46-8443-7cbdfe13cd1c-kube-api-access-2m7wk" (OuterVolumeSpecName: "kube-api-access-2m7wk") pod "16d21285-da91-4a46-8443-7cbdfe13cd1c" (UID: "16d21285-da91-4a46-8443-7cbdfe13cd1c"). InnerVolumeSpecName "kube-api-access-2m7wk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.799211 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-config-data" (OuterVolumeSpecName: "config-data") pod "16d21285-da91-4a46-8443-7cbdfe13cd1c" (UID: "16d21285-da91-4a46-8443-7cbdfe13cd1c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.827556 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16d21285-da91-4a46-8443-7cbdfe13cd1c" (UID: "16d21285-da91-4a46-8443-7cbdfe13cd1c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.876643 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2m7wk\" (UniqueName: \"kubernetes.io/projected/16d21285-da91-4a46-8443-7cbdfe13cd1c-kube-api-access-2m7wk\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.876678 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:37 crc kubenswrapper[4940]: I1126 09:40:37.876688 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16d21285-da91-4a46-8443-7cbdfe13cd1c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.207336 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.211214 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" event={"ID":"4fd0ed71-a15f-4e19-a43f-2822fc14199a","Type":"ContainerStarted","Data":"7d923f4503e0859a27174a37d4e57bf97e6a067b1cdf1dbdf77c79741bfe56ed"} Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.216641 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.217103 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.217121 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"16d21285-da91-4a46-8443-7cbdfe13cd1c","Type":"ContainerDied","Data":"1c0bccd9ce223ee004a976d5578e54da821dca1960686f19e2cd39dc4c52d905"} Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.217390 4940 scope.go:117] "RemoveContainer" containerID="7839800394ff273955e113f230a0c7c3891774792b193261806e9ef7465de682" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.248198 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" podStartSLOduration=2.46358294 podStartE2EDuration="3.248182316s" podCreationTimestamp="2025-11-26 09:40:35 +0000 UTC" firstStartedPulling="2025-11-26 09:40:36.693102284 +0000 UTC m=+9938.213243903" lastFinishedPulling="2025-11-26 09:40:37.47770166 +0000 UTC m=+9938.997843279" observedRunningTime="2025-11-26 09:40:38.239944795 +0000 UTC m=+9939.760086414" watchObservedRunningTime="2025-11-26 09:40:38.248182316 +0000 UTC m=+9939.768323935" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.449461 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.465661 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.479551 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: E1126 09:40:38.480082 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16d21285-da91-4a46-8443-7cbdfe13cd1c" containerName="nova-cell0-conductor-conductor" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.480100 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="16d21285-da91-4a46-8443-7cbdfe13cd1c" containerName="nova-cell0-conductor-conductor" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.480311 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="16d21285-da91-4a46-8443-7cbdfe13cd1c" containerName="nova-cell0-conductor-conductor" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.483075 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.485281 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.489778 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcd5r\" (UniqueName: \"kubernetes.io/projected/6ab21b1c-967a-485b-aa85-b4027f59d859-kube-api-access-mcd5r\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.489961 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ab21b1c-967a-485b-aa85-b4027f59d859-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.490274 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ab21b1c-967a-485b-aa85-b4027f59d859-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.491937 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.504533 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.514407 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.523904 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.525594 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.527893 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.539502 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.591847 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ab21b1c-967a-485b-aa85-b4027f59d859-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.591913 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcd5r\" (UniqueName: \"kubernetes.io/projected/6ab21b1c-967a-485b-aa85-b4027f59d859-kube-api-access-mcd5r\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.591945 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ab21b1c-967a-485b-aa85-b4027f59d859-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.596434 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ab21b1c-967a-485b-aa85-b4027f59d859-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.601171 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ab21b1c-967a-485b-aa85-b4027f59d859-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.609543 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcd5r\" (UniqueName: \"kubernetes.io/projected/6ab21b1c-967a-485b-aa85-b4027f59d859-kube-api-access-mcd5r\") pod \"nova-cell0-conductor-0\" (UID: \"6ab21b1c-967a-485b-aa85-b4027f59d859\") " pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.694311 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp2qq\" (UniqueName: \"kubernetes.io/projected/d97fb3be-c270-4527-a526-686ad8d6fec6-kube-api-access-gp2qq\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.694503 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d97fb3be-c270-4527-a526-686ad8d6fec6-config-data\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.694589 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d97fb3be-c270-4527-a526-686ad8d6fec6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.797401 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d97fb3be-c270-4527-a526-686ad8d6fec6-config-data\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.797490 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d97fb3be-c270-4527-a526-686ad8d6fec6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.797553 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp2qq\" (UniqueName: \"kubernetes.io/projected/d97fb3be-c270-4527-a526-686ad8d6fec6-kube-api-access-gp2qq\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.802437 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.805211 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d97fb3be-c270-4527-a526-686ad8d6fec6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.812569 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d97fb3be-c270-4527-a526-686ad8d6fec6-config-data\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.818438 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp2qq\" (UniqueName: \"kubernetes.io/projected/d97fb3be-c270-4527-a526-686ad8d6fec6-kube-api-access-gp2qq\") pod \"nova-scheduler-0\" (UID: \"d97fb3be-c270-4527-a526-686ad8d6fec6\") " pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.846506 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.948509 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.124:8775/\": read tcp 10.217.0.2:53784->10.217.1.124:8775: read: connection reset by peer" Nov 26 09:40:38 crc kubenswrapper[4940]: I1126 09:40:38.948599 4940 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.124:8775/\": read tcp 10.217.0.2:53772->10.217.1.124:8775: read: connection reset by peer" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.200400 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16d21285-da91-4a46-8443-7cbdfe13cd1c" path="/var/lib/kubelet/pods/16d21285-da91-4a46-8443-7cbdfe13cd1c/volumes" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.201346 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa" path="/var/lib/kubelet/pods/e0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa/volumes" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.202584 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0b14554-e939-4a03-95a3-10517457591a" path="/var/lib/kubelet/pods/f0b14554-e939-4a03-95a3-10517457591a/volumes" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.237374 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"9629092f-7cc7-4e81-94b2-8a021a314962","Type":"ContainerStarted","Data":"5889a66d0be5178219b26b7c1d329adc8154f6463ad4d98a64ccd24c3efd483c"} Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.237414 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"9629092f-7cc7-4e81-94b2-8a021a314962","Type":"ContainerStarted","Data":"b0eb2c0101bfdb3b65be5c88da3ea983170b020a1c05c6280f52320342297d69"} Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.239185 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.242880 4940 generic.go:334] "Generic (PLEG): container finished" podID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerID="61e408d219bb80c5fcd96efc5eb0ad4b7a9f4e6a5c5c4677e5d950b70dcf7198" exitCode=0 Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.242949 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cf79c6-c1be-4547-b294-f2f7361aa574","Type":"ContainerDied","Data":"61e408d219bb80c5fcd96efc5eb0ad4b7a9f4e6a5c5c4677e5d950b70dcf7198"} Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.257347 4940 generic.go:334] "Generic (PLEG): container finished" podID="912011e6-2802-415f-9e1b-39ddfec0f182" containerID="87a17963a9354e55ecada023c358dba20d8b3c3d610fe454219ac91a85895250" exitCode=0 Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.257433 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"912011e6-2802-415f-9e1b-39ddfec0f182","Type":"ContainerDied","Data":"87a17963a9354e55ecada023c358dba20d8b3c3d610fe454219ac91a85895250"} Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.270400 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.270378784 podStartE2EDuration="2.270378784s" podCreationTimestamp="2025-11-26 09:40:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:40:39.255533982 +0000 UTC m=+9940.775675601" watchObservedRunningTime="2025-11-26 09:40:39.270378784 +0000 UTC m=+9940.790520403" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.398945 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.497051 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.587099 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 09:40:39 crc kubenswrapper[4940]: W1126 09:40:39.608182 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd97fb3be_c270_4527_a526_686ad8d6fec6.slice/crio-7f3501af78ab4a4f6c4e383f1f69a3deb3839feb0da5fe8be2b509614d925f2a WatchSource:0}: Error finding container 7f3501af78ab4a4f6c4e383f1f69a3deb3839feb0da5fe8be2b509614d925f2a: Status 404 returned error can't find the container with id 7f3501af78ab4a4f6c4e383f1f69a3deb3839feb0da5fe8be2b509614d925f2a Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.618726 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-combined-ca-bundle\") pod \"08cf79c6-c1be-4547-b294-f2f7361aa574\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.618970 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqtjh\" (UniqueName: \"kubernetes.io/projected/08cf79c6-c1be-4547-b294-f2f7361aa574-kube-api-access-nqtjh\") pod \"08cf79c6-c1be-4547-b294-f2f7361aa574\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.619009 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-config-data\") pod \"08cf79c6-c1be-4547-b294-f2f7361aa574\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.619096 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cf79c6-c1be-4547-b294-f2f7361aa574-logs\") pod \"08cf79c6-c1be-4547-b294-f2f7361aa574\" (UID: \"08cf79c6-c1be-4547-b294-f2f7361aa574\") " Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.623271 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08cf79c6-c1be-4547-b294-f2f7361aa574-logs" (OuterVolumeSpecName: "logs") pod "08cf79c6-c1be-4547-b294-f2f7361aa574" (UID: "08cf79c6-c1be-4547-b294-f2f7361aa574"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.624998 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08cf79c6-c1be-4547-b294-f2f7361aa574-kube-api-access-nqtjh" (OuterVolumeSpecName: "kube-api-access-nqtjh") pod "08cf79c6-c1be-4547-b294-f2f7361aa574" (UID: "08cf79c6-c1be-4547-b294-f2f7361aa574"). InnerVolumeSpecName "kube-api-access-nqtjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.646475 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.663990 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-config-data" (OuterVolumeSpecName: "config-data") pod "08cf79c6-c1be-4547-b294-f2f7361aa574" (UID: "08cf79c6-c1be-4547-b294-f2f7361aa574"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.714474 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08cf79c6-c1be-4547-b294-f2f7361aa574" (UID: "08cf79c6-c1be-4547-b294-f2f7361aa574"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.722007 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqtjh\" (UniqueName: \"kubernetes.io/projected/08cf79c6-c1be-4547-b294-f2f7361aa574-kube-api-access-nqtjh\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.722032 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.722068 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cf79c6-c1be-4547-b294-f2f7361aa574-logs\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.722077 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cf79c6-c1be-4547-b294-f2f7361aa574-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.823110 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-config-data\") pod \"912011e6-2802-415f-9e1b-39ddfec0f182\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.823223 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-combined-ca-bundle\") pod \"912011e6-2802-415f-9e1b-39ddfec0f182\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.823297 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912011e6-2802-415f-9e1b-39ddfec0f182-logs\") pod \"912011e6-2802-415f-9e1b-39ddfec0f182\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.823352 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7jjz\" (UniqueName: \"kubernetes.io/projected/912011e6-2802-415f-9e1b-39ddfec0f182-kube-api-access-v7jjz\") pod \"912011e6-2802-415f-9e1b-39ddfec0f182\" (UID: \"912011e6-2802-415f-9e1b-39ddfec0f182\") " Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.824505 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/912011e6-2802-415f-9e1b-39ddfec0f182-logs" (OuterVolumeSpecName: "logs") pod "912011e6-2802-415f-9e1b-39ddfec0f182" (UID: "912011e6-2802-415f-9e1b-39ddfec0f182"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.826222 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/912011e6-2802-415f-9e1b-39ddfec0f182-kube-api-access-v7jjz" (OuterVolumeSpecName: "kube-api-access-v7jjz") pod "912011e6-2802-415f-9e1b-39ddfec0f182" (UID: "912011e6-2802-415f-9e1b-39ddfec0f182"). InnerVolumeSpecName "kube-api-access-v7jjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.848088 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-config-data" (OuterVolumeSpecName: "config-data") pod "912011e6-2802-415f-9e1b-39ddfec0f182" (UID: "912011e6-2802-415f-9e1b-39ddfec0f182"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.851195 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "912011e6-2802-415f-9e1b-39ddfec0f182" (UID: "912011e6-2802-415f-9e1b-39ddfec0f182"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.925944 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.925973 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/912011e6-2802-415f-9e1b-39ddfec0f182-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.925986 4940 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/912011e6-2802-415f-9e1b-39ddfec0f182-logs\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:39 crc kubenswrapper[4940]: I1126 09:40:39.925996 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7jjz\" (UniqueName: \"kubernetes.io/projected/912011e6-2802-415f-9e1b-39ddfec0f182-kube-api-access-v7jjz\") on node \"crc\" DevicePath \"\"" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.274535 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"912011e6-2802-415f-9e1b-39ddfec0f182","Type":"ContainerDied","Data":"e118e8633cadda7a0b817fd66efc94c3229710c3948d7433ca222091284e85b2"} Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.274597 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.276449 4940 scope.go:117] "RemoveContainer" containerID="87a17963a9354e55ecada023c358dba20d8b3c3d610fe454219ac91a85895250" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.276478 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.276692 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ab21b1c-967a-485b-aa85-b4027f59d859","Type":"ContainerStarted","Data":"066d0735f8e18e7580df79c866bd90b2ce1b765f71876033b562aabe58759db9"} Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.276815 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ab21b1c-967a-485b-aa85-b4027f59d859","Type":"ContainerStarted","Data":"0ddc770dcbcdbd973263a570612a36329daac92c6e4ba18513d257a395cb667e"} Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.278646 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.278645 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cf79c6-c1be-4547-b294-f2f7361aa574","Type":"ContainerDied","Data":"ce8478f22df6095a887da974c1f748534dd598159d2e97d96a668d3a9c7f25e4"} Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.282140 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d97fb3be-c270-4527-a526-686ad8d6fec6","Type":"ContainerStarted","Data":"7bbf3f13b1db366669cbd9b7710e9f4a13eb889b526e03866bf742e9f3cb10cf"} Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.282201 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d97fb3be-c270-4527-a526-686ad8d6fec6","Type":"ContainerStarted","Data":"7f3501af78ab4a4f6c4e383f1f69a3deb3839feb0da5fe8be2b509614d925f2a"} Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.318075 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.318052471 podStartE2EDuration="2.318052471s" podCreationTimestamp="2025-11-26 09:40:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:40:40.301509635 +0000 UTC m=+9941.821651294" watchObservedRunningTime="2025-11-26 09:40:40.318052471 +0000 UTC m=+9941.838194090" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.321993 4940 scope.go:117] "RemoveContainer" containerID="a2602e1a43f7c9a447a7d77ead2b8bda0ab0ab0f32d87126ff38e712bcc5cc84" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.360124 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.368898 4940 scope.go:117] "RemoveContainer" containerID="61e408d219bb80c5fcd96efc5eb0ad4b7a9f4e6a5c5c4677e5d950b70dcf7198" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.386467 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389134 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 09:40:40 crc kubenswrapper[4940]: E1126 09:40:40.389632 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-api" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389648 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-api" Nov 26 09:40:40 crc kubenswrapper[4940]: E1126 09:40:40.389669 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-log" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389676 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-log" Nov 26 09:40:40 crc kubenswrapper[4940]: E1126 09:40:40.389720 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-log" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389729 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-log" Nov 26 09:40:40 crc kubenswrapper[4940]: E1126 09:40:40.389740 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-metadata" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389748 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-metadata" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389931 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-api" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389949 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-metadata" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389972 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" containerName="nova-api-log" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.389984 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" containerName="nova-metadata-log" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.391112 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.396507 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.403173 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.4031493360000002 podStartE2EDuration="2.403149336s" podCreationTimestamp="2025-11-26 09:40:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:40:40.351287677 +0000 UTC m=+9941.871429296" watchObservedRunningTime="2025-11-26 09:40:40.403149336 +0000 UTC m=+9941.923290955" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.420996 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.452597 4940 scope.go:117] "RemoveContainer" containerID="8cd4682e79902c32f98b0243d6629777b564b16dd50f6b2c5051a7d77c6c8f27" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.452738 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.476210 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.529560 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.531389 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.533522 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.539295 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csj7c\" (UniqueName: \"kubernetes.io/projected/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-kube-api-access-csj7c\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.539681 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.541288 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.541424 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-logs\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.541546 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-config-data\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.643903 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csj7c\" (UniqueName: \"kubernetes.io/projected/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-kube-api-access-csj7c\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.644144 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.644191 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mqlc\" (UniqueName: \"kubernetes.io/projected/785e01eb-e9db-4e60-8cd5-cfeed89c4865-kube-api-access-8mqlc\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.644300 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-logs\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.644427 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785e01eb-e9db-4e60-8cd5-cfeed89c4865-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.645737 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-config-data\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.645823 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/785e01eb-e9db-4e60-8cd5-cfeed89c4865-config-data\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.645910 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/785e01eb-e9db-4e60-8cd5-cfeed89c4865-logs\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.645751 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-logs\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.663458 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.663670 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-config-data\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.668599 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csj7c\" (UniqueName: \"kubernetes.io/projected/f01319e0-009a-4b1e-b8d5-d76d5d180ab4-kube-api-access-csj7c\") pod \"nova-api-0\" (UID: \"f01319e0-009a-4b1e-b8d5-d76d5d180ab4\") " pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.735191 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.748374 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mqlc\" (UniqueName: \"kubernetes.io/projected/785e01eb-e9db-4e60-8cd5-cfeed89c4865-kube-api-access-8mqlc\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.748578 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785e01eb-e9db-4e60-8cd5-cfeed89c4865-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.748609 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/785e01eb-e9db-4e60-8cd5-cfeed89c4865-config-data\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.748641 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/785e01eb-e9db-4e60-8cd5-cfeed89c4865-logs\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.749085 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/785e01eb-e9db-4e60-8cd5-cfeed89c4865-logs\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.753574 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785e01eb-e9db-4e60-8cd5-cfeed89c4865-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.760240 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/785e01eb-e9db-4e60-8cd5-cfeed89c4865-config-data\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.765304 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mqlc\" (UniqueName: \"kubernetes.io/projected/785e01eb-e9db-4e60-8cd5-cfeed89c4865-kube-api-access-8mqlc\") pod \"nova-metadata-0\" (UID: \"785e01eb-e9db-4e60-8cd5-cfeed89c4865\") " pod="openstack/nova-metadata-0" Nov 26 09:40:40 crc kubenswrapper[4940]: I1126 09:40:40.855401 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 09:40:41 crc kubenswrapper[4940]: I1126 09:40:41.204106 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08cf79c6-c1be-4547-b294-f2f7361aa574" path="/var/lib/kubelet/pods/08cf79c6-c1be-4547-b294-f2f7361aa574/volumes" Nov 26 09:40:41 crc kubenswrapper[4940]: I1126 09:40:41.205655 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="912011e6-2802-415f-9e1b-39ddfec0f182" path="/var/lib/kubelet/pods/912011e6-2802-415f-9e1b-39ddfec0f182/volumes" Nov 26 09:40:41 crc kubenswrapper[4940]: I1126 09:40:41.388979 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 09:40:41 crc kubenswrapper[4940]: W1126 09:40:41.595756 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod785e01eb_e9db_4e60_8cd5_cfeed89c4865.slice/crio-d99d7a674dad57a6d021899778992c442f6056bc783d0852cbb74b2ff28d41cd WatchSource:0}: Error finding container d99d7a674dad57a6d021899778992c442f6056bc783d0852cbb74b2ff28d41cd: Status 404 returned error can't find the container with id d99d7a674dad57a6d021899778992c442f6056bc783d0852cbb74b2ff28d41cd Nov 26 09:40:41 crc kubenswrapper[4940]: I1126 09:40:41.603725 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 09:40:42 crc kubenswrapper[4940]: I1126 09:40:42.311700 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"785e01eb-e9db-4e60-8cd5-cfeed89c4865","Type":"ContainerStarted","Data":"d81d6d03f844f5ebb8786a2558e8bcd3f47a7cea664aa988f6d39afe6911d829"} Nov 26 09:40:42 crc kubenswrapper[4940]: I1126 09:40:42.311972 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"785e01eb-e9db-4e60-8cd5-cfeed89c4865","Type":"ContainerStarted","Data":"cee077a74c9da2fc6bc2cea771f172822f33be1f2ee5ca8c52212170aba10cc6"} Nov 26 09:40:42 crc kubenswrapper[4940]: I1126 09:40:42.311982 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"785e01eb-e9db-4e60-8cd5-cfeed89c4865","Type":"ContainerStarted","Data":"d99d7a674dad57a6d021899778992c442f6056bc783d0852cbb74b2ff28d41cd"} Nov 26 09:40:42 crc kubenswrapper[4940]: I1126 09:40:42.314766 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f01319e0-009a-4b1e-b8d5-d76d5d180ab4","Type":"ContainerStarted","Data":"bb322898540964e2d1a91395f919ee48f085afa1e3e5a6aa79ec2cfd7cf597f2"} Nov 26 09:40:42 crc kubenswrapper[4940]: I1126 09:40:42.314791 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f01319e0-009a-4b1e-b8d5-d76d5d180ab4","Type":"ContainerStarted","Data":"8b1400d630ab67e621932ab979db1845b27604817d2a00c081548e750413e7a4"} Nov 26 09:40:42 crc kubenswrapper[4940]: I1126 09:40:42.314801 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f01319e0-009a-4b1e-b8d5-d76d5d180ab4","Type":"ContainerStarted","Data":"49ce9248b825a82fbada818491023197b7ec7a4cc9affa50273411ecbb38a364"} Nov 26 09:40:42 crc kubenswrapper[4940]: I1126 09:40:42.341577 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.341561091 podStartE2EDuration="2.341561091s" podCreationTimestamp="2025-11-26 09:40:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:40:42.338232695 +0000 UTC m=+9943.858374314" watchObservedRunningTime="2025-11-26 09:40:42.341561091 +0000 UTC m=+9943.861702710" Nov 26 09:40:43 crc kubenswrapper[4940]: I1126 09:40:43.847188 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 09:40:45 crc kubenswrapper[4940]: I1126 09:40:45.856321 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 09:40:45 crc kubenswrapper[4940]: I1126 09:40:45.856620 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 09:40:47 crc kubenswrapper[4940]: I1126 09:40:47.753197 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 09:40:47 crc kubenswrapper[4940]: I1126 09:40:47.796203 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=7.796170118 podStartE2EDuration="7.796170118s" podCreationTimestamp="2025-11-26 09:40:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:40:42.373412814 +0000 UTC m=+9943.893554433" watchObservedRunningTime="2025-11-26 09:40:47.796170118 +0000 UTC m=+9949.316311767" Nov 26 09:40:48 crc kubenswrapper[4940]: I1126 09:40:48.847709 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 09:40:48 crc kubenswrapper[4940]: I1126 09:40:48.856892 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 09:40:48 crc kubenswrapper[4940]: I1126 09:40:48.922388 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 09:40:49 crc kubenswrapper[4940]: I1126 09:40:49.493728 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 09:40:50 crc kubenswrapper[4940]: I1126 09:40:50.735788 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 09:40:50 crc kubenswrapper[4940]: I1126 09:40:50.735850 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 09:40:50 crc kubenswrapper[4940]: I1126 09:40:50.856291 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 09:40:50 crc kubenswrapper[4940]: I1126 09:40:50.856355 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 09:40:51 crc kubenswrapper[4940]: I1126 09:40:51.818242 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f01319e0-009a-4b1e-b8d5-d76d5d180ab4" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.231:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 09:40:51 crc kubenswrapper[4940]: I1126 09:40:51.818252 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f01319e0-009a-4b1e-b8d5-d76d5d180ab4" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.231:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 09:40:51 crc kubenswrapper[4940]: I1126 09:40:51.939296 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="785e01eb-e9db-4e60-8cd5-cfeed89c4865" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.232:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 09:40:51 crc kubenswrapper[4940]: I1126 09:40:51.939602 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="785e01eb-e9db-4e60-8cd5-cfeed89c4865" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.232:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.742207 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.742856 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.743479 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.743494 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.747506 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.748770 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.865296 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.866193 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 09:41:00 crc kubenswrapper[4940]: I1126 09:41:00.875123 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 09:41:01 crc kubenswrapper[4940]: I1126 09:41:01.619558 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 09:41:07 crc kubenswrapper[4940]: I1126 09:41:07.222305 4940 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pode0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pode0d99baa-f0ab-4c1e-b0c6-9c6ef8ebd4fa] : Timed out while waiting for systemd to remove kubepods-besteffort-pode0d99baa_f0ab_4c1e_b0c6_9c6ef8ebd4fa.slice" Nov 26 09:41:17 crc kubenswrapper[4940]: I1126 09:41:17.468703 4940 scope.go:117] "RemoveContainer" containerID="2ca50f6cda18fa9be1e2d835aaa30879b39f9dc7cbe5f1764500dd3ef907357f" Nov 26 09:41:21 crc kubenswrapper[4940]: I1126 09:41:21.728505 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:41:21 crc kubenswrapper[4940]: I1126 09:41:21.728847 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:41:51 crc kubenswrapper[4940]: I1126 09:41:51.728527 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:41:51 crc kubenswrapper[4940]: I1126 09:41:51.729059 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.505665 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qxlkh"] Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.509192 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.530638 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qxlkh"] Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.622822 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-utilities\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.623829 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p96v4\" (UniqueName: \"kubernetes.io/projected/1c726790-fab9-4095-87e0-f011c2367329-kube-api-access-p96v4\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.624025 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-catalog-content\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.726199 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-catalog-content\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.726346 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-utilities\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.726523 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p96v4\" (UniqueName: \"kubernetes.io/projected/1c726790-fab9-4095-87e0-f011c2367329-kube-api-access-p96v4\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.726992 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-catalog-content\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.727028 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-utilities\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.746774 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p96v4\" (UniqueName: \"kubernetes.io/projected/1c726790-fab9-4095-87e0-f011c2367329-kube-api-access-p96v4\") pod \"community-operators-qxlkh\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:53 crc kubenswrapper[4940]: I1126 09:41:53.845074 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:41:54 crc kubenswrapper[4940]: I1126 09:41:54.465624 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qxlkh"] Nov 26 09:41:55 crc kubenswrapper[4940]: I1126 09:41:55.335443 4940 generic.go:334] "Generic (PLEG): container finished" podID="1c726790-fab9-4095-87e0-f011c2367329" containerID="d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03" exitCode=0 Nov 26 09:41:55 crc kubenswrapper[4940]: I1126 09:41:55.335539 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxlkh" event={"ID":"1c726790-fab9-4095-87e0-f011c2367329","Type":"ContainerDied","Data":"d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03"} Nov 26 09:41:55 crc kubenswrapper[4940]: I1126 09:41:55.345502 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxlkh" event={"ID":"1c726790-fab9-4095-87e0-f011c2367329","Type":"ContainerStarted","Data":"c4750d83163c300e0a741ffd1bb31db81c161e8e0d7f8458d331950946e8efa2"} Nov 26 09:41:55 crc kubenswrapper[4940]: I1126 09:41:55.350114 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:41:56 crc kubenswrapper[4940]: I1126 09:41:56.349714 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxlkh" event={"ID":"1c726790-fab9-4095-87e0-f011c2367329","Type":"ContainerStarted","Data":"f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865"} Nov 26 09:41:58 crc kubenswrapper[4940]: I1126 09:41:58.379888 4940 generic.go:334] "Generic (PLEG): container finished" podID="1c726790-fab9-4095-87e0-f011c2367329" containerID="f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865" exitCode=0 Nov 26 09:41:58 crc kubenswrapper[4940]: I1126 09:41:58.379990 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxlkh" event={"ID":"1c726790-fab9-4095-87e0-f011c2367329","Type":"ContainerDied","Data":"f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865"} Nov 26 09:42:00 crc kubenswrapper[4940]: I1126 09:42:00.425286 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxlkh" event={"ID":"1c726790-fab9-4095-87e0-f011c2367329","Type":"ContainerStarted","Data":"2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786"} Nov 26 09:42:00 crc kubenswrapper[4940]: I1126 09:42:00.454127 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qxlkh" podStartSLOduration=3.4238562249999998 podStartE2EDuration="7.454102533s" podCreationTimestamp="2025-11-26 09:41:53 +0000 UTC" firstStartedPulling="2025-11-26 09:41:55.349678305 +0000 UTC m=+10016.869819944" lastFinishedPulling="2025-11-26 09:41:59.379924623 +0000 UTC m=+10020.900066252" observedRunningTime="2025-11-26 09:42:00.449850027 +0000 UTC m=+10021.969991686" watchObservedRunningTime="2025-11-26 09:42:00.454102533 +0000 UTC m=+10021.974244192" Nov 26 09:42:03 crc kubenswrapper[4940]: I1126 09:42:03.845927 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:42:03 crc kubenswrapper[4940]: I1126 09:42:03.846535 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:42:04 crc kubenswrapper[4940]: I1126 09:42:04.896457 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-qxlkh" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="registry-server" probeResult="failure" output=< Nov 26 09:42:04 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:42:04 crc kubenswrapper[4940]: > Nov 26 09:42:13 crc kubenswrapper[4940]: I1126 09:42:13.914576 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:42:13 crc kubenswrapper[4940]: I1126 09:42:13.976802 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:42:14 crc kubenswrapper[4940]: I1126 09:42:14.174196 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qxlkh"] Nov 26 09:42:15 crc kubenswrapper[4940]: I1126 09:42:15.642607 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qxlkh" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="registry-server" containerID="cri-o://2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786" gracePeriod=2 Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.202265 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.323624 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-catalog-content\") pod \"1c726790-fab9-4095-87e0-f011c2367329\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.323986 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p96v4\" (UniqueName: \"kubernetes.io/projected/1c726790-fab9-4095-87e0-f011c2367329-kube-api-access-p96v4\") pod \"1c726790-fab9-4095-87e0-f011c2367329\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.324163 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-utilities\") pod \"1c726790-fab9-4095-87e0-f011c2367329\" (UID: \"1c726790-fab9-4095-87e0-f011c2367329\") " Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.324765 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-utilities" (OuterVolumeSpecName: "utilities") pod "1c726790-fab9-4095-87e0-f011c2367329" (UID: "1c726790-fab9-4095-87e0-f011c2367329"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.330515 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c726790-fab9-4095-87e0-f011c2367329-kube-api-access-p96v4" (OuterVolumeSpecName: "kube-api-access-p96v4") pod "1c726790-fab9-4095-87e0-f011c2367329" (UID: "1c726790-fab9-4095-87e0-f011c2367329"). InnerVolumeSpecName "kube-api-access-p96v4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.390733 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c726790-fab9-4095-87e0-f011c2367329" (UID: "1c726790-fab9-4095-87e0-f011c2367329"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.427479 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.427530 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p96v4\" (UniqueName: \"kubernetes.io/projected/1c726790-fab9-4095-87e0-f011c2367329-kube-api-access-p96v4\") on node \"crc\" DevicePath \"\"" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.427544 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c726790-fab9-4095-87e0-f011c2367329-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.661761 4940 generic.go:334] "Generic (PLEG): container finished" podID="1c726790-fab9-4095-87e0-f011c2367329" containerID="2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786" exitCode=0 Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.661812 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxlkh" event={"ID":"1c726790-fab9-4095-87e0-f011c2367329","Type":"ContainerDied","Data":"2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786"} Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.661843 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxlkh" event={"ID":"1c726790-fab9-4095-87e0-f011c2367329","Type":"ContainerDied","Data":"c4750d83163c300e0a741ffd1bb31db81c161e8e0d7f8458d331950946e8efa2"} Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.661869 4940 scope.go:117] "RemoveContainer" containerID="2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.662072 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxlkh" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.709719 4940 scope.go:117] "RemoveContainer" containerID="f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.726741 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qxlkh"] Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.735846 4940 scope.go:117] "RemoveContainer" containerID="d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.740695 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qxlkh"] Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.791651 4940 scope.go:117] "RemoveContainer" containerID="2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786" Nov 26 09:42:16 crc kubenswrapper[4940]: E1126 09:42:16.792059 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786\": container with ID starting with 2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786 not found: ID does not exist" containerID="2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.792104 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786"} err="failed to get container status \"2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786\": rpc error: code = NotFound desc = could not find container \"2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786\": container with ID starting with 2f969c41831709e9f589af17553eec31c565844ca056a7f9a0756f322cb9d786 not found: ID does not exist" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.792133 4940 scope.go:117] "RemoveContainer" containerID="f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865" Nov 26 09:42:16 crc kubenswrapper[4940]: E1126 09:42:16.792476 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865\": container with ID starting with f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865 not found: ID does not exist" containerID="f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.792504 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865"} err="failed to get container status \"f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865\": rpc error: code = NotFound desc = could not find container \"f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865\": container with ID starting with f93aff08178de328e6cfb4314900a678ffe32793fe4fc59197e1ae23531a4865 not found: ID does not exist" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.792523 4940 scope.go:117] "RemoveContainer" containerID="d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03" Nov 26 09:42:16 crc kubenswrapper[4940]: E1126 09:42:16.792771 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03\": container with ID starting with d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03 not found: ID does not exist" containerID="d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03" Nov 26 09:42:16 crc kubenswrapper[4940]: I1126 09:42:16.792800 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03"} err="failed to get container status \"d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03\": rpc error: code = NotFound desc = could not find container \"d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03\": container with ID starting with d9677625200af5ee56fdd74cfec45da30413c8909ae00e7fc74b736f840e7c03 not found: ID does not exist" Nov 26 09:42:17 crc kubenswrapper[4940]: I1126 09:42:17.185419 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c726790-fab9-4095-87e0-f011c2367329" path="/var/lib/kubelet/pods/1c726790-fab9-4095-87e0-f011c2367329/volumes" Nov 26 09:42:21 crc kubenswrapper[4940]: I1126 09:42:21.728197 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:42:21 crc kubenswrapper[4940]: I1126 09:42:21.728833 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:42:21 crc kubenswrapper[4940]: I1126 09:42:21.728891 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:42:21 crc kubenswrapper[4940]: I1126 09:42:21.730495 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"47d5d2db4ba968e85e34ad466f92d109f867faaa638ca2b2990de6899520ee90"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:42:21 crc kubenswrapper[4940]: I1126 09:42:21.730589 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://47d5d2db4ba968e85e34ad466f92d109f867faaa638ca2b2990de6899520ee90" gracePeriod=600 Nov 26 09:42:22 crc kubenswrapper[4940]: I1126 09:42:22.746905 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="47d5d2db4ba968e85e34ad466f92d109f867faaa638ca2b2990de6899520ee90" exitCode=0 Nov 26 09:42:22 crc kubenswrapper[4940]: I1126 09:42:22.747316 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"47d5d2db4ba968e85e34ad466f92d109f867faaa638ca2b2990de6899520ee90"} Nov 26 09:42:22 crc kubenswrapper[4940]: I1126 09:42:22.748883 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba"} Nov 26 09:42:22 crc kubenswrapper[4940]: I1126 09:42:22.748929 4940 scope.go:117] "RemoveContainer" containerID="217b982bbe955f2d123759b5980428c0834fad2ecf4ee8dfb960813d64b98b4e" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.503971 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xs8r5"] Nov 26 09:43:19 crc kubenswrapper[4940]: E1126 09:43:19.505742 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="registry-server" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.505820 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="registry-server" Nov 26 09:43:19 crc kubenswrapper[4940]: E1126 09:43:19.505888 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="extract-utilities" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.505946 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="extract-utilities" Nov 26 09:43:19 crc kubenswrapper[4940]: E1126 09:43:19.506016 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="extract-content" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.506087 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="extract-content" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.506320 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c726790-fab9-4095-87e0-f011c2367329" containerName="registry-server" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.507894 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.525759 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xs8r5"] Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.675101 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xfhs\" (UniqueName: \"kubernetes.io/projected/158b5416-6a23-49ad-8156-af9ec2cf638a-kube-api-access-5xfhs\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.675157 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-catalog-content\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.675718 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-utilities\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.777551 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xfhs\" (UniqueName: \"kubernetes.io/projected/158b5416-6a23-49ad-8156-af9ec2cf638a-kube-api-access-5xfhs\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.777618 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-catalog-content\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.777707 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-utilities\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.778410 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-utilities\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.779155 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-catalog-content\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.804839 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xfhs\" (UniqueName: \"kubernetes.io/projected/158b5416-6a23-49ad-8156-af9ec2cf638a-kube-api-access-5xfhs\") pod \"redhat-operators-xs8r5\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:19 crc kubenswrapper[4940]: I1126 09:43:19.828575 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:20 crc kubenswrapper[4940]: I1126 09:43:20.377950 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xs8r5"] Nov 26 09:43:20 crc kubenswrapper[4940]: I1126 09:43:20.575934 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xs8r5" event={"ID":"158b5416-6a23-49ad-8156-af9ec2cf638a","Type":"ContainerStarted","Data":"fbc15f60798a9ebb6ef86055b9d5581721d025a4973d388d862c9c433a69915a"} Nov 26 09:43:21 crc kubenswrapper[4940]: I1126 09:43:21.591935 4940 generic.go:334] "Generic (PLEG): container finished" podID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerID="e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6" exitCode=0 Nov 26 09:43:21 crc kubenswrapper[4940]: I1126 09:43:21.592010 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xs8r5" event={"ID":"158b5416-6a23-49ad-8156-af9ec2cf638a","Type":"ContainerDied","Data":"e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6"} Nov 26 09:43:23 crc kubenswrapper[4940]: I1126 09:43:23.622033 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xs8r5" event={"ID":"158b5416-6a23-49ad-8156-af9ec2cf638a","Type":"ContainerStarted","Data":"b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3"} Nov 26 09:43:25 crc kubenswrapper[4940]: E1126 09:43:25.659476 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod158b5416_6a23_49ad_8156_af9ec2cf638a.slice/crio-b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3.scope\": RecentStats: unable to find data in memory cache]" Nov 26 09:43:25 crc kubenswrapper[4940]: I1126 09:43:25.664352 4940 generic.go:334] "Generic (PLEG): container finished" podID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerID="b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3" exitCode=0 Nov 26 09:43:25 crc kubenswrapper[4940]: I1126 09:43:25.664483 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xs8r5" event={"ID":"158b5416-6a23-49ad-8156-af9ec2cf638a","Type":"ContainerDied","Data":"b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3"} Nov 26 09:43:26 crc kubenswrapper[4940]: I1126 09:43:26.678538 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xs8r5" event={"ID":"158b5416-6a23-49ad-8156-af9ec2cf638a","Type":"ContainerStarted","Data":"24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803"} Nov 26 09:43:26 crc kubenswrapper[4940]: I1126 09:43:26.705497 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xs8r5" podStartSLOduration=2.919878933 podStartE2EDuration="7.705480608s" podCreationTimestamp="2025-11-26 09:43:19 +0000 UTC" firstStartedPulling="2025-11-26 09:43:21.595929347 +0000 UTC m=+10103.116070976" lastFinishedPulling="2025-11-26 09:43:26.381530992 +0000 UTC m=+10107.901672651" observedRunningTime="2025-11-26 09:43:26.699343323 +0000 UTC m=+10108.219484942" watchObservedRunningTime="2025-11-26 09:43:26.705480608 +0000 UTC m=+10108.225622227" Nov 26 09:43:29 crc kubenswrapper[4940]: I1126 09:43:29.829731 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:29 crc kubenswrapper[4940]: I1126 09:43:29.830227 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:30 crc kubenswrapper[4940]: I1126 09:43:30.901816 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xs8r5" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="registry-server" probeResult="failure" output=< Nov 26 09:43:30 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:43:30 crc kubenswrapper[4940]: > Nov 26 09:43:39 crc kubenswrapper[4940]: I1126 09:43:39.882948 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:39 crc kubenswrapper[4940]: I1126 09:43:39.930465 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:40 crc kubenswrapper[4940]: I1126 09:43:40.127465 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xs8r5"] Nov 26 09:43:41 crc kubenswrapper[4940]: I1126 09:43:41.876939 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xs8r5" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="registry-server" containerID="cri-o://24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803" gracePeriod=2 Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.402811 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.510621 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-catalog-content\") pod \"158b5416-6a23-49ad-8156-af9ec2cf638a\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.510788 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xfhs\" (UniqueName: \"kubernetes.io/projected/158b5416-6a23-49ad-8156-af9ec2cf638a-kube-api-access-5xfhs\") pod \"158b5416-6a23-49ad-8156-af9ec2cf638a\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.510832 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-utilities\") pod \"158b5416-6a23-49ad-8156-af9ec2cf638a\" (UID: \"158b5416-6a23-49ad-8156-af9ec2cf638a\") " Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.512451 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-utilities" (OuterVolumeSpecName: "utilities") pod "158b5416-6a23-49ad-8156-af9ec2cf638a" (UID: "158b5416-6a23-49ad-8156-af9ec2cf638a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.515430 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/158b5416-6a23-49ad-8156-af9ec2cf638a-kube-api-access-5xfhs" (OuterVolumeSpecName: "kube-api-access-5xfhs") pod "158b5416-6a23-49ad-8156-af9ec2cf638a" (UID: "158b5416-6a23-49ad-8156-af9ec2cf638a"). InnerVolumeSpecName "kube-api-access-5xfhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.601459 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "158b5416-6a23-49ad-8156-af9ec2cf638a" (UID: "158b5416-6a23-49ad-8156-af9ec2cf638a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.613319 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.613350 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xfhs\" (UniqueName: \"kubernetes.io/projected/158b5416-6a23-49ad-8156-af9ec2cf638a-kube-api-access-5xfhs\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.613362 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158b5416-6a23-49ad-8156-af9ec2cf638a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.892122 4940 generic.go:334] "Generic (PLEG): container finished" podID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerID="24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803" exitCode=0 Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.892196 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xs8r5" event={"ID":"158b5416-6a23-49ad-8156-af9ec2cf638a","Type":"ContainerDied","Data":"24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803"} Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.892231 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xs8r5" event={"ID":"158b5416-6a23-49ad-8156-af9ec2cf638a","Type":"ContainerDied","Data":"fbc15f60798a9ebb6ef86055b9d5581721d025a4973d388d862c9c433a69915a"} Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.892252 4940 scope.go:117] "RemoveContainer" containerID="24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.892274 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xs8r5" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.933624 4940 scope.go:117] "RemoveContainer" containerID="b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3" Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.960775 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xs8r5"] Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.974526 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xs8r5"] Nov 26 09:43:42 crc kubenswrapper[4940]: I1126 09:43:42.986028 4940 scope.go:117] "RemoveContainer" containerID="e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6" Nov 26 09:43:43 crc kubenswrapper[4940]: I1126 09:43:43.014878 4940 scope.go:117] "RemoveContainer" containerID="24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803" Nov 26 09:43:43 crc kubenswrapper[4940]: E1126 09:43:43.015397 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803\": container with ID starting with 24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803 not found: ID does not exist" containerID="24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803" Nov 26 09:43:43 crc kubenswrapper[4940]: I1126 09:43:43.015439 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803"} err="failed to get container status \"24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803\": rpc error: code = NotFound desc = could not find container \"24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803\": container with ID starting with 24836d37554e353094987f213b2064fa492e6cb2bb3f9fb5f7f1faa7f04b2803 not found: ID does not exist" Nov 26 09:43:43 crc kubenswrapper[4940]: I1126 09:43:43.015466 4940 scope.go:117] "RemoveContainer" containerID="b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3" Nov 26 09:43:43 crc kubenswrapper[4940]: E1126 09:43:43.015854 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3\": container with ID starting with b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3 not found: ID does not exist" containerID="b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3" Nov 26 09:43:43 crc kubenswrapper[4940]: I1126 09:43:43.015890 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3"} err="failed to get container status \"b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3\": rpc error: code = NotFound desc = could not find container \"b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3\": container with ID starting with b708fda49c6ebcb746108a9801c1b9d88c01386a616e12ae9b7e036101a28be3 not found: ID does not exist" Nov 26 09:43:43 crc kubenswrapper[4940]: I1126 09:43:43.015910 4940 scope.go:117] "RemoveContainer" containerID="e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6" Nov 26 09:43:43 crc kubenswrapper[4940]: E1126 09:43:43.016165 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6\": container with ID starting with e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6 not found: ID does not exist" containerID="e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6" Nov 26 09:43:43 crc kubenswrapper[4940]: I1126 09:43:43.016197 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6"} err="failed to get container status \"e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6\": rpc error: code = NotFound desc = could not find container \"e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6\": container with ID starting with e9a37af0f79f4cde1664b9170a9bc3588c91c5d3e378b41e0466e58ff3b9f2a6 not found: ID does not exist" Nov 26 09:43:43 crc kubenswrapper[4940]: I1126 09:43:43.213863 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" path="/var/lib/kubelet/pods/158b5416-6a23-49ad-8156-af9ec2cf638a/volumes" Nov 26 09:43:47 crc kubenswrapper[4940]: I1126 09:43:47.946027 4940 generic.go:334] "Generic (PLEG): container finished" podID="4fd0ed71-a15f-4e19-a43f-2822fc14199a" containerID="7d923f4503e0859a27174a37d4e57bf97e6a067b1cdf1dbdf77c79741bfe56ed" exitCode=0 Nov 26 09:43:47 crc kubenswrapper[4940]: I1126 09:43:47.946071 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" event={"ID":"4fd0ed71-a15f-4e19-a43f-2822fc14199a","Type":"ContainerDied","Data":"7d923f4503e0859a27174a37d4e57bf97e6a067b1cdf1dbdf77c79741bfe56ed"} Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.479549 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.664847 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ceph\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.664953 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-0\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665001 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-combined-ca-bundle\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665052 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-0\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665133 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-1\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665187 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-1\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665254 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xdjh\" (UniqueName: \"kubernetes.io/projected/4fd0ed71-a15f-4e19-a43f-2822fc14199a-kube-api-access-9xdjh\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665292 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-0\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665409 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ssh-key\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665475 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-1\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.665491 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-inventory\") pod \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\" (UID: \"4fd0ed71-a15f-4e19-a43f-2822fc14199a\") " Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.672796 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fd0ed71-a15f-4e19-a43f-2822fc14199a-kube-api-access-9xdjh" (OuterVolumeSpecName: "kube-api-access-9xdjh") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "kube-api-access-9xdjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.673571 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.686822 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ceph" (OuterVolumeSpecName: "ceph") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.696473 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.706600 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.710019 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.713474 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.716223 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.719826 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.740298 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.740340 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-inventory" (OuterVolumeSpecName: "inventory") pod "4fd0ed71-a15f-4e19-a43f-2822fc14199a" (UID: "4fd0ed71-a15f-4e19-a43f-2822fc14199a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768349 4940 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768390 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xdjh\" (UniqueName: \"kubernetes.io/projected/4fd0ed71-a15f-4e19-a43f-2822fc14199a-kube-api-access-9xdjh\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768403 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768414 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768424 4940 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768435 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768443 4940 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-ceph\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768451 4940 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768459 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768468 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.768476 4940 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/4fd0ed71-a15f-4e19-a43f-2822fc14199a-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.968603 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" event={"ID":"4fd0ed71-a15f-4e19-a43f-2822fc14199a","Type":"ContainerDied","Data":"d29f4248c74e3213e247ad7f71ddd813938ea01512ac6bc91f7983a428c834bc"} Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.968644 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d29f4248c74e3213e247ad7f71ddd813938ea01512ac6bc91f7983a428c834bc" Nov 26 09:43:49 crc kubenswrapper[4940]: I1126 09:43:49.968717 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l" Nov 26 09:44:51 crc kubenswrapper[4940]: I1126 09:44:51.728776 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:44:51 crc kubenswrapper[4940]: I1126 09:44:51.729351 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.158740 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68"] Nov 26 09:45:00 crc kubenswrapper[4940]: E1126 09:45:00.160085 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="registry-server" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.160105 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="registry-server" Nov 26 09:45:00 crc kubenswrapper[4940]: E1126 09:45:00.160177 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="extract-utilities" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.160187 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="extract-utilities" Nov 26 09:45:00 crc kubenswrapper[4940]: E1126 09:45:00.160200 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="extract-content" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.160208 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="extract-content" Nov 26 09:45:00 crc kubenswrapper[4940]: E1126 09:45:00.160266 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd0ed71-a15f-4e19-a43f-2822fc14199a" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.160276 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd0ed71-a15f-4e19-a43f-2822fc14199a" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.160534 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fd0ed71-a15f-4e19-a43f-2822fc14199a" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.160566 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="158b5416-6a23-49ad-8156-af9ec2cf638a" containerName="registry-server" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.161970 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.167435 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.167816 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.171650 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68"] Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.323840 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84rlc\" (UniqueName: \"kubernetes.io/projected/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-kube-api-access-84rlc\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.324087 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-secret-volume\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.324121 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-config-volume\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.425428 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84rlc\" (UniqueName: \"kubernetes.io/projected/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-kube-api-access-84rlc\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.425602 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-secret-volume\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.425640 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-config-volume\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.427020 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-config-volume\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.431311 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-secret-volume\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.450501 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84rlc\" (UniqueName: \"kubernetes.io/projected/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-kube-api-access-84rlc\") pod \"collect-profiles-29402505-hrl68\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.498515 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:00 crc kubenswrapper[4940]: I1126 09:45:00.970361 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68"] Nov 26 09:45:01 crc kubenswrapper[4940]: I1126 09:45:01.917752 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" event={"ID":"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab","Type":"ContainerStarted","Data":"58d42be50ec8d2e4e16fa61013e7ca319f77a50b5d485f5e10a1d346f5f9a384"} Nov 26 09:45:01 crc kubenswrapper[4940]: I1126 09:45:01.918298 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" event={"ID":"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab","Type":"ContainerStarted","Data":"8c88de5cb682567af1b9e0a64849d78e6fcd97a699f4eff16d5d5d5eb981fe96"} Nov 26 09:45:01 crc kubenswrapper[4940]: I1126 09:45:01.938406 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" podStartSLOduration=1.938384342 podStartE2EDuration="1.938384342s" podCreationTimestamp="2025-11-26 09:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:45:01.931003737 +0000 UTC m=+10203.451145366" watchObservedRunningTime="2025-11-26 09:45:01.938384342 +0000 UTC m=+10203.458525971" Nov 26 09:45:02 crc kubenswrapper[4940]: I1126 09:45:02.937827 4940 generic.go:334] "Generic (PLEG): container finished" podID="f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab" containerID="58d42be50ec8d2e4e16fa61013e7ca319f77a50b5d485f5e10a1d346f5f9a384" exitCode=0 Nov 26 09:45:02 crc kubenswrapper[4940]: I1126 09:45:02.937905 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" event={"ID":"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab","Type":"ContainerDied","Data":"58d42be50ec8d2e4e16fa61013e7ca319f77a50b5d485f5e10a1d346f5f9a384"} Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.381386 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.551968 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-config-volume\") pod \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.552449 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84rlc\" (UniqueName: \"kubernetes.io/projected/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-kube-api-access-84rlc\") pod \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.552484 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-secret-volume\") pod \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\" (UID: \"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab\") " Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.553054 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-config-volume" (OuterVolumeSpecName: "config-volume") pod "f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab" (UID: "f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.558369 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-kube-api-access-84rlc" (OuterVolumeSpecName: "kube-api-access-84rlc") pod "f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab" (UID: "f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab"). InnerVolumeSpecName "kube-api-access-84rlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.559052 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab" (UID: "f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.654976 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.655026 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.655064 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84rlc\" (UniqueName: \"kubernetes.io/projected/f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab-kube-api-access-84rlc\") on node \"crc\" DevicePath \"\"" Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.979569 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" event={"ID":"f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab","Type":"ContainerDied","Data":"8c88de5cb682567af1b9e0a64849d78e6fcd97a699f4eff16d5d5d5eb981fe96"} Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.979642 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c88de5cb682567af1b9e0a64849d78e6fcd97a699f4eff16d5d5d5eb981fe96" Nov 26 09:45:04 crc kubenswrapper[4940]: I1126 09:45:04.979915 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402505-hrl68" Nov 26 09:45:05 crc kubenswrapper[4940]: I1126 09:45:05.028438 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9"] Nov 26 09:45:05 crc kubenswrapper[4940]: I1126 09:45:05.041198 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402460-p7fz9"] Nov 26 09:45:05 crc kubenswrapper[4940]: I1126 09:45:05.180612 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31542d31-1cc8-4a7b-a85d-af79874ed4b9" path="/var/lib/kubelet/pods/31542d31-1cc8-4a7b-a85d-af79874ed4b9/volumes" Nov 26 09:45:15 crc kubenswrapper[4940]: I1126 09:45:15.817229 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 09:45:15 crc kubenswrapper[4940]: I1126 09:45:15.817997 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-copy-data" podUID="109dbcfc-960f-4aab-a9ad-fa756001dca4" containerName="adoption" containerID="cri-o://99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055" gracePeriod=30 Nov 26 09:45:17 crc kubenswrapper[4940]: I1126 09:45:17.744130 4940 scope.go:117] "RemoveContainer" containerID="4c44ce744c763bbac7282a1e346efc730ce2e29178a9f5cb0c19f94032efaf82" Nov 26 09:45:21 crc kubenswrapper[4940]: I1126 09:45:21.729278 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:45:21 crc kubenswrapper[4940]: I1126 09:45:21.729949 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.072337 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qvlrk"] Nov 26 09:45:41 crc kubenswrapper[4940]: E1126 09:45:41.073434 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab" containerName="collect-profiles" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.073450 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab" containerName="collect-profiles" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.073696 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f34c2866-3e06-4ac5-8aaf-32b2fca1b5ab" containerName="collect-profiles" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.075616 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.097106 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qvlrk"] Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.179071 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-utilities\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.179336 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfhhk\" (UniqueName: \"kubernetes.io/projected/00c5760b-3ced-4363-9ff5-49cb8240edde-kube-api-access-xfhhk\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.179441 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-catalog-content\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.280981 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfhhk\" (UniqueName: \"kubernetes.io/projected/00c5760b-3ced-4363-9ff5-49cb8240edde-kube-api-access-xfhhk\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.281116 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-catalog-content\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.281237 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-utilities\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.281937 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-utilities\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.281956 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-catalog-content\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.306076 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfhhk\" (UniqueName: \"kubernetes.io/projected/00c5760b-3ced-4363-9ff5-49cb8240edde-kube-api-access-xfhhk\") pod \"certified-operators-qvlrk\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.406205 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:41 crc kubenswrapper[4940]: I1126 09:45:41.908070 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qvlrk"] Nov 26 09:45:42 crc kubenswrapper[4940]: I1126 09:45:42.513448 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvlrk" event={"ID":"00c5760b-3ced-4363-9ff5-49cb8240edde","Type":"ContainerStarted","Data":"fad281c725b7ac501b822d54fae24a708b6b17245ac992247f85f7a834356558"} Nov 26 09:45:43 crc kubenswrapper[4940]: I1126 09:45:43.528891 4940 generic.go:334] "Generic (PLEG): container finished" podID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerID="3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b" exitCode=0 Nov 26 09:45:43 crc kubenswrapper[4940]: I1126 09:45:43.528982 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvlrk" event={"ID":"00c5760b-3ced-4363-9ff5-49cb8240edde","Type":"ContainerDied","Data":"3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b"} Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.327601 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.406793 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7fdw\" (UniqueName: \"kubernetes.io/projected/109dbcfc-960f-4aab-a9ad-fa756001dca4-kube-api-access-q7fdw\") pod \"109dbcfc-960f-4aab-a9ad-fa756001dca4\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") " Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.407814 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mariadb-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\") pod \"109dbcfc-960f-4aab-a9ad-fa756001dca4\" (UID: \"109dbcfc-960f-4aab-a9ad-fa756001dca4\") " Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.430833 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc" (OuterVolumeSpecName: "mariadb-data") pod "109dbcfc-960f-4aab-a9ad-fa756001dca4" (UID: "109dbcfc-960f-4aab-a9ad-fa756001dca4"). InnerVolumeSpecName "pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.431033 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/109dbcfc-960f-4aab-a9ad-fa756001dca4-kube-api-access-q7fdw" (OuterVolumeSpecName: "kube-api-access-q7fdw") pod "109dbcfc-960f-4aab-a9ad-fa756001dca4" (UID: "109dbcfc-960f-4aab-a9ad-fa756001dca4"). InnerVolumeSpecName "kube-api-access-q7fdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.509917 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\") on node \"crc\" " Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.510231 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7fdw\" (UniqueName: \"kubernetes.io/projected/109dbcfc-960f-4aab-a9ad-fa756001dca4-kube-api-access-q7fdw\") on node \"crc\" DevicePath \"\"" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.538692 4940 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.538947 4940 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc") on node "crc" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.575665 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvlrk" event={"ID":"00c5760b-3ced-4363-9ff5-49cb8240edde","Type":"ContainerStarted","Data":"f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10"} Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.577380 4940 generic.go:334] "Generic (PLEG): container finished" podID="109dbcfc-960f-4aab-a9ad-fa756001dca4" containerID="99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055" exitCode=137 Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.577434 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"109dbcfc-960f-4aab-a9ad-fa756001dca4","Type":"ContainerDied","Data":"99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055"} Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.577460 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.577498 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"109dbcfc-960f-4aab-a9ad-fa756001dca4","Type":"ContainerDied","Data":"e39ccfbf2f849d443a0b68f6d02b3fc163ba5728d521bcb9e02dc7160eb77bb4"} Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.577521 4940 scope.go:117] "RemoveContainer" containerID="99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.613991 4940 reconciler_common.go:293] "Volume detached for volume \"pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0362ae28-f0ad-4684-b4af-a3536ceb6afc\") on node \"crc\" DevicePath \"\"" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.637248 4940 scope.go:117] "RemoveContainer" containerID="99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055" Nov 26 09:45:46 crc kubenswrapper[4940]: E1126 09:45:46.637978 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055\": container with ID starting with 99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055 not found: ID does not exist" containerID="99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.638066 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055"} err="failed to get container status \"99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055\": rpc error: code = NotFound desc = could not find container \"99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055\": container with ID starting with 99418f3a5cc9e5b4fa18bce717f28c788e3ae02872e3c239a7a3135da2028055 not found: ID does not exist" Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.650257 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 09:45:46 crc kubenswrapper[4940]: I1126 09:45:46.662884 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 09:45:47 crc kubenswrapper[4940]: I1126 09:45:47.178953 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="109dbcfc-960f-4aab-a9ad-fa756001dca4" path="/var/lib/kubelet/pods/109dbcfc-960f-4aab-a9ad-fa756001dca4/volumes" Nov 26 09:45:47 crc kubenswrapper[4940]: I1126 09:45:47.278024 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 09:45:47 crc kubenswrapper[4940]: I1126 09:45:47.278391 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-copy-data" podUID="53b10b93-4376-4a88-9ab0-98ea88881ded" containerName="adoption" containerID="cri-o://48ed3f1fe397c68af4339668a277462bc16a299dbb64ff03b086bd88f1f92712" gracePeriod=30 Nov 26 09:45:47 crc kubenswrapper[4940]: I1126 09:45:47.593408 4940 generic.go:334] "Generic (PLEG): container finished" podID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerID="f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10" exitCode=0 Nov 26 09:45:47 crc kubenswrapper[4940]: I1126 09:45:47.593456 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvlrk" event={"ID":"00c5760b-3ced-4363-9ff5-49cb8240edde","Type":"ContainerDied","Data":"f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10"} Nov 26 09:45:49 crc kubenswrapper[4940]: I1126 09:45:49.615892 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvlrk" event={"ID":"00c5760b-3ced-4363-9ff5-49cb8240edde","Type":"ContainerStarted","Data":"0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612"} Nov 26 09:45:49 crc kubenswrapper[4940]: I1126 09:45:49.646924 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qvlrk" podStartSLOduration=3.820416564 podStartE2EDuration="8.646899508s" podCreationTimestamp="2025-11-26 09:45:41 +0000 UTC" firstStartedPulling="2025-11-26 09:45:43.53175861 +0000 UTC m=+10245.051900239" lastFinishedPulling="2025-11-26 09:45:48.358241534 +0000 UTC m=+10249.878383183" observedRunningTime="2025-11-26 09:45:49.63373631 +0000 UTC m=+10251.153877939" watchObservedRunningTime="2025-11-26 09:45:49.646899508 +0000 UTC m=+10251.167041137" Nov 26 09:45:51 crc kubenswrapper[4940]: I1126 09:45:51.406674 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:51 crc kubenswrapper[4940]: I1126 09:45:51.407367 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:51 crc kubenswrapper[4940]: I1126 09:45:51.484462 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:45:51 crc kubenswrapper[4940]: I1126 09:45:51.728736 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:45:51 crc kubenswrapper[4940]: I1126 09:45:51.729127 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:45:51 crc kubenswrapper[4940]: I1126 09:45:51.729169 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:45:51 crc kubenswrapper[4940]: I1126 09:45:51.729967 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:45:51 crc kubenswrapper[4940]: I1126 09:45:51.730032 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" gracePeriod=600 Nov 26 09:45:52 crc kubenswrapper[4940]: E1126 09:45:52.358817 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:45:52 crc kubenswrapper[4940]: I1126 09:45:52.663446 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" exitCode=0 Nov 26 09:45:52 crc kubenswrapper[4940]: I1126 09:45:52.663540 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba"} Nov 26 09:45:52 crc kubenswrapper[4940]: I1126 09:45:52.664006 4940 scope.go:117] "RemoveContainer" containerID="47d5d2db4ba968e85e34ad466f92d109f867faaa638ca2b2990de6899520ee90" Nov 26 09:45:52 crc kubenswrapper[4940]: I1126 09:45:52.665111 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:45:52 crc kubenswrapper[4940]: E1126 09:45:52.665742 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:46:01 crc kubenswrapper[4940]: I1126 09:46:01.483517 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:46:01 crc kubenswrapper[4940]: I1126 09:46:01.580164 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qvlrk"] Nov 26 09:46:01 crc kubenswrapper[4940]: I1126 09:46:01.782840 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qvlrk" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerName="registry-server" containerID="cri-o://0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612" gracePeriod=2 Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.520170 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.653633 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-utilities\") pod \"00c5760b-3ced-4363-9ff5-49cb8240edde\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.653872 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfhhk\" (UniqueName: \"kubernetes.io/projected/00c5760b-3ced-4363-9ff5-49cb8240edde-kube-api-access-xfhhk\") pod \"00c5760b-3ced-4363-9ff5-49cb8240edde\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.653977 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-catalog-content\") pod \"00c5760b-3ced-4363-9ff5-49cb8240edde\" (UID: \"00c5760b-3ced-4363-9ff5-49cb8240edde\") " Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.654920 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-utilities" (OuterVolumeSpecName: "utilities") pod "00c5760b-3ced-4363-9ff5-49cb8240edde" (UID: "00c5760b-3ced-4363-9ff5-49cb8240edde"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.661448 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00c5760b-3ced-4363-9ff5-49cb8240edde-kube-api-access-xfhhk" (OuterVolumeSpecName: "kube-api-access-xfhhk") pod "00c5760b-3ced-4363-9ff5-49cb8240edde" (UID: "00c5760b-3ced-4363-9ff5-49cb8240edde"). InnerVolumeSpecName "kube-api-access-xfhhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.711821 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00c5760b-3ced-4363-9ff5-49cb8240edde" (UID: "00c5760b-3ced-4363-9ff5-49cb8240edde"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.756639 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.756689 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfhhk\" (UniqueName: \"kubernetes.io/projected/00c5760b-3ced-4363-9ff5-49cb8240edde-kube-api-access-xfhhk\") on node \"crc\" DevicePath \"\"" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.756706 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00c5760b-3ced-4363-9ff5-49cb8240edde-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.799930 4940 generic.go:334] "Generic (PLEG): container finished" podID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerID="0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612" exitCode=0 Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.799999 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvlrk" event={"ID":"00c5760b-3ced-4363-9ff5-49cb8240edde","Type":"ContainerDied","Data":"0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612"} Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.800086 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qvlrk" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.800123 4940 scope.go:117] "RemoveContainer" containerID="0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.800109 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvlrk" event={"ID":"00c5760b-3ced-4363-9ff5-49cb8240edde","Type":"ContainerDied","Data":"fad281c725b7ac501b822d54fae24a708b6b17245ac992247f85f7a834356558"} Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.840304 4940 scope.go:117] "RemoveContainer" containerID="f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.851030 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qvlrk"] Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.864678 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qvlrk"] Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.894135 4940 scope.go:117] "RemoveContainer" containerID="3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.952078 4940 scope.go:117] "RemoveContainer" containerID="0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612" Nov 26 09:46:02 crc kubenswrapper[4940]: E1126 09:46:02.952528 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612\": container with ID starting with 0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612 not found: ID does not exist" containerID="0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.952639 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612"} err="failed to get container status \"0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612\": rpc error: code = NotFound desc = could not find container \"0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612\": container with ID starting with 0511001b143c73dd957a33f782759714553c24f4681cb90769707e47d181c612 not found: ID does not exist" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.952733 4940 scope.go:117] "RemoveContainer" containerID="f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10" Nov 26 09:46:02 crc kubenswrapper[4940]: E1126 09:46:02.953418 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10\": container with ID starting with f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10 not found: ID does not exist" containerID="f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.953695 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10"} err="failed to get container status \"f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10\": rpc error: code = NotFound desc = could not find container \"f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10\": container with ID starting with f9f11fdf73c5aaef41710264e2c741635137d6ae8409939fc6d1a35cc513bc10 not found: ID does not exist" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.953788 4940 scope.go:117] "RemoveContainer" containerID="3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b" Nov 26 09:46:02 crc kubenswrapper[4940]: E1126 09:46:02.954201 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b\": container with ID starting with 3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b not found: ID does not exist" containerID="3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b" Nov 26 09:46:02 crc kubenswrapper[4940]: I1126 09:46:02.954294 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b"} err="failed to get container status \"3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b\": rpc error: code = NotFound desc = could not find container \"3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b\": container with ID starting with 3c73713837f665655028b614019d5dc4fb1102549c4fd5b9033db1e8e333d20b not found: ID does not exist" Nov 26 09:46:03 crc kubenswrapper[4940]: I1126 09:46:03.165576 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:46:03 crc kubenswrapper[4940]: E1126 09:46:03.165847 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:46:03 crc kubenswrapper[4940]: I1126 09:46:03.187678 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" path="/var/lib/kubelet/pods/00c5760b-3ced-4363-9ff5-49cb8240edde/volumes" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.166323 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:46:17 crc kubenswrapper[4940]: E1126 09:46:17.167095 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.727596 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.786243 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522\") pod \"53b10b93-4376-4a88-9ab0-98ea88881ded\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.786456 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5j2sz\" (UniqueName: \"kubernetes.io/projected/53b10b93-4376-4a88-9ab0-98ea88881ded-kube-api-access-5j2sz\") pod \"53b10b93-4376-4a88-9ab0-98ea88881ded\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.786513 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/53b10b93-4376-4a88-9ab0-98ea88881ded-ovn-data-cert\") pod \"53b10b93-4376-4a88-9ab0-98ea88881ded\" (UID: \"53b10b93-4376-4a88-9ab0-98ea88881ded\") " Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.795274 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53b10b93-4376-4a88-9ab0-98ea88881ded-kube-api-access-5j2sz" (OuterVolumeSpecName: "kube-api-access-5j2sz") pod "53b10b93-4376-4a88-9ab0-98ea88881ded" (UID: "53b10b93-4376-4a88-9ab0-98ea88881ded"). InnerVolumeSpecName "kube-api-access-5j2sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.797094 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53b10b93-4376-4a88-9ab0-98ea88881ded-ovn-data-cert" (OuterVolumeSpecName: "ovn-data-cert") pod "53b10b93-4376-4a88-9ab0-98ea88881ded" (UID: "53b10b93-4376-4a88-9ab0-98ea88881ded"). InnerVolumeSpecName "ovn-data-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.805318 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522" (OuterVolumeSpecName: "ovn-data") pod "53b10b93-4376-4a88-9ab0-98ea88881ded" (UID: "53b10b93-4376-4a88-9ab0-98ea88881ded"). InnerVolumeSpecName "pvc-7d78e14c-ea9c-436a-894a-313587835522". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.815166 4940 scope.go:117] "RemoveContainer" containerID="48ed3f1fe397c68af4339668a277462bc16a299dbb64ff03b086bd88f1f92712" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.889062 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7d78e14c-ea9c-436a-894a-313587835522\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522\") on node \"crc\" " Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.889299 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5j2sz\" (UniqueName: \"kubernetes.io/projected/53b10b93-4376-4a88-9ab0-98ea88881ded-kube-api-access-5j2sz\") on node \"crc\" DevicePath \"\"" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.889312 4940 reconciler_common.go:293] "Volume detached for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/53b10b93-4376-4a88-9ab0-98ea88881ded-ovn-data-cert\") on node \"crc\" DevicePath \"\"" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.931972 4940 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.932145 4940 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7d78e14c-ea9c-436a-894a-313587835522" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522") on node "crc" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.980094 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.980130 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"53b10b93-4376-4a88-9ab0-98ea88881ded","Type":"ContainerDied","Data":"48ed3f1fe397c68af4339668a277462bc16a299dbb64ff03b086bd88f1f92712"} Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.980242 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"53b10b93-4376-4a88-9ab0-98ea88881ded","Type":"ContainerDied","Data":"0627062bf4d1baf7fdeb446dac66ee6c72aa50e721d1836f6b65452a50241eed"} Nov 26 09:46:17 crc kubenswrapper[4940]: I1126 09:46:17.992405 4940 reconciler_common.go:293] "Volume detached for volume \"pvc-7d78e14c-ea9c-436a-894a-313587835522\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d78e14c-ea9c-436a-894a-313587835522\") on node \"crc\" DevicePath \"\"" Nov 26 09:46:18 crc kubenswrapper[4940]: I1126 09:46:18.033407 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 09:46:18 crc kubenswrapper[4940]: I1126 09:46:18.047878 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 09:46:19 crc kubenswrapper[4940]: I1126 09:46:19.179621 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53b10b93-4376-4a88-9ab0-98ea88881ded" path="/var/lib/kubelet/pods/53b10b93-4376-4a88-9ab0-98ea88881ded/volumes" Nov 26 09:46:29 crc kubenswrapper[4940]: I1126 09:46:29.178706 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:46:29 crc kubenswrapper[4940]: E1126 09:46:29.179478 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:46:41 crc kubenswrapper[4940]: I1126 09:46:41.165374 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:46:41 crc kubenswrapper[4940]: E1126 09:46:41.166076 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:46:52 crc kubenswrapper[4940]: I1126 09:46:52.166259 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:46:52 crc kubenswrapper[4940]: E1126 09:46:52.167588 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:47:07 crc kubenswrapper[4940]: I1126 09:47:07.166862 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:47:07 crc kubenswrapper[4940]: E1126 09:47:07.168627 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:47:20 crc kubenswrapper[4940]: I1126 09:47:20.166378 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:47:20 crc kubenswrapper[4940]: E1126 09:47:20.167662 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.367764 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5qn2f"] Nov 26 09:47:27 crc kubenswrapper[4940]: E1126 09:47:27.368888 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="109dbcfc-960f-4aab-a9ad-fa756001dca4" containerName="adoption" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.368908 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="109dbcfc-960f-4aab-a9ad-fa756001dca4" containerName="adoption" Nov 26 09:47:27 crc kubenswrapper[4940]: E1126 09:47:27.368936 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerName="extract-content" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.368947 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerName="extract-content" Nov 26 09:47:27 crc kubenswrapper[4940]: E1126 09:47:27.368966 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerName="extract-utilities" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.368977 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerName="extract-utilities" Nov 26 09:47:27 crc kubenswrapper[4940]: E1126 09:47:27.368993 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerName="registry-server" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.369000 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerName="registry-server" Nov 26 09:47:27 crc kubenswrapper[4940]: E1126 09:47:27.369060 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53b10b93-4376-4a88-9ab0-98ea88881ded" containerName="adoption" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.369069 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="53b10b93-4376-4a88-9ab0-98ea88881ded" containerName="adoption" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.369311 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="109dbcfc-960f-4aab-a9ad-fa756001dca4" containerName="adoption" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.369354 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="00c5760b-3ced-4363-9ff5-49cb8240edde" containerName="registry-server" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.369376 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="53b10b93-4376-4a88-9ab0-98ea88881ded" containerName="adoption" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.371291 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.383534 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5qn2f"] Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.420630 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-utilities\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.420702 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-catalog-content\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.420765 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjbc6\" (UniqueName: \"kubernetes.io/projected/1e1c150a-467c-419e-900a-a9fb8eba8cc4-kube-api-access-rjbc6\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.522387 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-utilities\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.522468 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-catalog-content\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.522528 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjbc6\" (UniqueName: \"kubernetes.io/projected/1e1c150a-467c-419e-900a-a9fb8eba8cc4-kube-api-access-rjbc6\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.522878 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-utilities\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.522898 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-catalog-content\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.542334 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjbc6\" (UniqueName: \"kubernetes.io/projected/1e1c150a-467c-419e-900a-a9fb8eba8cc4-kube-api-access-rjbc6\") pod \"redhat-marketplace-5qn2f\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:27 crc kubenswrapper[4940]: I1126 09:47:27.691572 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:28 crc kubenswrapper[4940]: I1126 09:47:28.151914 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5qn2f"] Nov 26 09:47:28 crc kubenswrapper[4940]: I1126 09:47:28.901055 4940 generic.go:334] "Generic (PLEG): container finished" podID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerID="283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f" exitCode=0 Nov 26 09:47:28 crc kubenswrapper[4940]: I1126 09:47:28.901166 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5qn2f" event={"ID":"1e1c150a-467c-419e-900a-a9fb8eba8cc4","Type":"ContainerDied","Data":"283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f"} Nov 26 09:47:28 crc kubenswrapper[4940]: I1126 09:47:28.901407 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5qn2f" event={"ID":"1e1c150a-467c-419e-900a-a9fb8eba8cc4","Type":"ContainerStarted","Data":"601c4d07fb8d3b1658fd43e2b1a8642146c5b52264a328ea79154e9d483aaeca"} Nov 26 09:47:28 crc kubenswrapper[4940]: I1126 09:47:28.908151 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:47:30 crc kubenswrapper[4940]: I1126 09:47:30.941987 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5qn2f" event={"ID":"1e1c150a-467c-419e-900a-a9fb8eba8cc4","Type":"ContainerStarted","Data":"d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391"} Nov 26 09:47:31 crc kubenswrapper[4940]: I1126 09:47:31.956564 4940 generic.go:334] "Generic (PLEG): container finished" podID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerID="d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391" exitCode=0 Nov 26 09:47:31 crc kubenswrapper[4940]: I1126 09:47:31.956637 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5qn2f" event={"ID":"1e1c150a-467c-419e-900a-a9fb8eba8cc4","Type":"ContainerDied","Data":"d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391"} Nov 26 09:47:32 crc kubenswrapper[4940]: I1126 09:47:32.168594 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:47:32 crc kubenswrapper[4940]: E1126 09:47:32.169223 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:47:32 crc kubenswrapper[4940]: I1126 09:47:32.974325 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5qn2f" event={"ID":"1e1c150a-467c-419e-900a-a9fb8eba8cc4","Type":"ContainerStarted","Data":"f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013"} Nov 26 09:47:33 crc kubenswrapper[4940]: I1126 09:47:33.007559 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5qn2f" podStartSLOduration=2.479813151 podStartE2EDuration="6.007535241s" podCreationTimestamp="2025-11-26 09:47:27 +0000 UTC" firstStartedPulling="2025-11-26 09:47:28.907816899 +0000 UTC m=+10350.427958518" lastFinishedPulling="2025-11-26 09:47:32.435538979 +0000 UTC m=+10353.955680608" observedRunningTime="2025-11-26 09:47:32.998029979 +0000 UTC m=+10354.518171958" watchObservedRunningTime="2025-11-26 09:47:33.007535241 +0000 UTC m=+10354.527676870" Nov 26 09:47:37 crc kubenswrapper[4940]: I1126 09:47:37.692135 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:37 crc kubenswrapper[4940]: I1126 09:47:37.692792 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:37 crc kubenswrapper[4940]: I1126 09:47:37.782196 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:38 crc kubenswrapper[4940]: I1126 09:47:38.093670 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:38 crc kubenswrapper[4940]: I1126 09:47:38.142153 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5qn2f"] Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.070804 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5qn2f" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerName="registry-server" containerID="cri-o://f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013" gracePeriod=2 Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.590304 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.657736 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjbc6\" (UniqueName: \"kubernetes.io/projected/1e1c150a-467c-419e-900a-a9fb8eba8cc4-kube-api-access-rjbc6\") pod \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.664475 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e1c150a-467c-419e-900a-a9fb8eba8cc4-kube-api-access-rjbc6" (OuterVolumeSpecName: "kube-api-access-rjbc6") pod "1e1c150a-467c-419e-900a-a9fb8eba8cc4" (UID: "1e1c150a-467c-419e-900a-a9fb8eba8cc4"). InnerVolumeSpecName "kube-api-access-rjbc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.759783 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-utilities\") pod \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.759871 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-catalog-content\") pod \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\" (UID: \"1e1c150a-467c-419e-900a-a9fb8eba8cc4\") " Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.760436 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjbc6\" (UniqueName: \"kubernetes.io/projected/1e1c150a-467c-419e-900a-a9fb8eba8cc4-kube-api-access-rjbc6\") on node \"crc\" DevicePath \"\"" Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.761565 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-utilities" (OuterVolumeSpecName: "utilities") pod "1e1c150a-467c-419e-900a-a9fb8eba8cc4" (UID: "1e1c150a-467c-419e-900a-a9fb8eba8cc4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.785644 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1e1c150a-467c-419e-900a-a9fb8eba8cc4" (UID: "1e1c150a-467c-419e-900a-a9fb8eba8cc4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.862323 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:47:40 crc kubenswrapper[4940]: I1126 09:47:40.862360 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1c150a-467c-419e-900a-a9fb8eba8cc4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.083565 4940 generic.go:334] "Generic (PLEG): container finished" podID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerID="f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013" exitCode=0 Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.083612 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5qn2f" event={"ID":"1e1c150a-467c-419e-900a-a9fb8eba8cc4","Type":"ContainerDied","Data":"f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013"} Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.083652 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5qn2f" event={"ID":"1e1c150a-467c-419e-900a-a9fb8eba8cc4","Type":"ContainerDied","Data":"601c4d07fb8d3b1658fd43e2b1a8642146c5b52264a328ea79154e9d483aaeca"} Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.083674 4940 scope.go:117] "RemoveContainer" containerID="f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.083718 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5qn2f" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.116835 4940 scope.go:117] "RemoveContainer" containerID="d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.144909 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5qn2f"] Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.157106 4940 scope.go:117] "RemoveContainer" containerID="283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.161656 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5qn2f"] Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.179931 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" path="/var/lib/kubelet/pods/1e1c150a-467c-419e-900a-a9fb8eba8cc4/volumes" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.185832 4940 scope.go:117] "RemoveContainer" containerID="f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013" Nov 26 09:47:41 crc kubenswrapper[4940]: E1126 09:47:41.186325 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013\": container with ID starting with f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013 not found: ID does not exist" containerID="f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.186385 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013"} err="failed to get container status \"f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013\": rpc error: code = NotFound desc = could not find container \"f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013\": container with ID starting with f35cd2a597bac845214407a6da2311289ef2dc20687869b88eeb1a31ba18f013 not found: ID does not exist" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.186413 4940 scope.go:117] "RemoveContainer" containerID="d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391" Nov 26 09:47:41 crc kubenswrapper[4940]: E1126 09:47:41.187007 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391\": container with ID starting with d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391 not found: ID does not exist" containerID="d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.187120 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391"} err="failed to get container status \"d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391\": rpc error: code = NotFound desc = could not find container \"d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391\": container with ID starting with d1ed61d991118505fead3b086b77c38880628fbb49653f7df43018fec23f1391 not found: ID does not exist" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.187213 4940 scope.go:117] "RemoveContainer" containerID="283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f" Nov 26 09:47:41 crc kubenswrapper[4940]: E1126 09:47:41.187679 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f\": container with ID starting with 283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f not found: ID does not exist" containerID="283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f" Nov 26 09:47:41 crc kubenswrapper[4940]: I1126 09:47:41.187712 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f"} err="failed to get container status \"283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f\": rpc error: code = NotFound desc = could not find container \"283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f\": container with ID starting with 283258d64b54e8ee5113a6dc4aa2182b349ee079d1628aed0f22886bb904c31f not found: ID does not exist" Nov 26 09:47:44 crc kubenswrapper[4940]: I1126 09:47:44.166095 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:47:44 crc kubenswrapper[4940]: E1126 09:47:44.166827 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:47:58 crc kubenswrapper[4940]: I1126 09:47:58.164973 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:47:58 crc kubenswrapper[4940]: E1126 09:47:58.165780 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:48:11 crc kubenswrapper[4940]: I1126 09:48:11.166109 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:48:11 crc kubenswrapper[4940]: E1126 09:48:11.166910 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:48:24 crc kubenswrapper[4940]: I1126 09:48:24.165401 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:48:24 crc kubenswrapper[4940]: E1126 09:48:24.166247 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:48:38 crc kubenswrapper[4940]: I1126 09:48:38.165976 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:48:38 crc kubenswrapper[4940]: E1126 09:48:38.166813 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:48:53 crc kubenswrapper[4940]: I1126 09:48:53.165555 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:48:53 crc kubenswrapper[4940]: E1126 09:48:53.166331 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:49:05 crc kubenswrapper[4940]: I1126 09:49:05.167170 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:49:05 crc kubenswrapper[4940]: E1126 09:49:05.168063 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:49:17 crc kubenswrapper[4940]: I1126 09:49:17.166167 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:49:17 crc kubenswrapper[4940]: E1126 09:49:17.166953 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:49:32 crc kubenswrapper[4940]: I1126 09:49:32.166291 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:49:32 crc kubenswrapper[4940]: E1126 09:49:32.167074 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:49:44 crc kubenswrapper[4940]: I1126 09:49:44.166377 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:49:44 crc kubenswrapper[4940]: E1126 09:49:44.167638 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:49:58 crc kubenswrapper[4940]: I1126 09:49:58.166253 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:49:58 crc kubenswrapper[4940]: E1126 09:49:58.167086 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:50:12 crc kubenswrapper[4940]: I1126 09:50:12.165764 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:50:12 crc kubenswrapper[4940]: E1126 09:50:12.166569 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:50:25 crc kubenswrapper[4940]: I1126 09:50:25.166174 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:50:25 crc kubenswrapper[4940]: E1126 09:50:25.167072 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:50:38 crc kubenswrapper[4940]: I1126 09:50:38.166277 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:50:38 crc kubenswrapper[4940]: E1126 09:50:38.167450 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:50:52 crc kubenswrapper[4940]: I1126 09:50:52.165444 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:50:53 crc kubenswrapper[4940]: I1126 09:50:53.409359 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"f4cd5bdaccff8541f7cd7902ad4e5c002312bb69798855d3e140b16f99d10c44"} Nov 26 09:53:21 crc kubenswrapper[4940]: I1126 09:53:21.727994 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:53:21 crc kubenswrapper[4940]: I1126 09:53:21.728756 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:53:51 crc kubenswrapper[4940]: I1126 09:53:51.728633 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:53:51 crc kubenswrapper[4940]: I1126 09:53:51.729279 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:54:08 crc kubenswrapper[4940]: I1126 09:54:08.990370 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tkpcq"] Nov 26 09:54:08 crc kubenswrapper[4940]: E1126 09:54:08.992344 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerName="registry-server" Nov 26 09:54:08 crc kubenswrapper[4940]: I1126 09:54:08.992454 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerName="registry-server" Nov 26 09:54:08 crc kubenswrapper[4940]: E1126 09:54:08.992551 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerName="extract-content" Nov 26 09:54:08 crc kubenswrapper[4940]: I1126 09:54:08.992632 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerName="extract-content" Nov 26 09:54:08 crc kubenswrapper[4940]: E1126 09:54:08.992732 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerName="extract-utilities" Nov 26 09:54:08 crc kubenswrapper[4940]: I1126 09:54:08.992820 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerName="extract-utilities" Nov 26 09:54:08 crc kubenswrapper[4940]: I1126 09:54:08.993258 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e1c150a-467c-419e-900a-a9fb8eba8cc4" containerName="registry-server" Nov 26 09:54:08 crc kubenswrapper[4940]: I1126 09:54:08.995267 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.004420 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-utilities\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.004639 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfsrp\" (UniqueName: \"kubernetes.io/projected/547fa41b-e3d2-4003-baf8-81a9f572a4a4-kube-api-access-qfsrp\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.004733 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-catalog-content\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.012443 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tkpcq"] Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.107177 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfsrp\" (UniqueName: \"kubernetes.io/projected/547fa41b-e3d2-4003-baf8-81a9f572a4a4-kube-api-access-qfsrp\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.107275 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-catalog-content\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.107362 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-utilities\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.107879 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-utilities\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.108411 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-catalog-content\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.139771 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfsrp\" (UniqueName: \"kubernetes.io/projected/547fa41b-e3d2-4003-baf8-81a9f572a4a4-kube-api-access-qfsrp\") pod \"redhat-operators-tkpcq\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.318052 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.810495 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tkpcq"] Nov 26 09:54:09 crc kubenswrapper[4940]: I1126 09:54:09.987546 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkpcq" event={"ID":"547fa41b-e3d2-4003-baf8-81a9f572a4a4","Type":"ContainerStarted","Data":"f6f3fc5d8f0b2e10abee9111fc1e06ae02875fc2be1a06805e93c7c6b339322f"} Nov 26 09:54:11 crc kubenswrapper[4940]: I1126 09:54:11.013210 4940 generic.go:334] "Generic (PLEG): container finished" podID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerID="740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff" exitCode=0 Nov 26 09:54:11 crc kubenswrapper[4940]: I1126 09:54:11.013437 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkpcq" event={"ID":"547fa41b-e3d2-4003-baf8-81a9f572a4a4","Type":"ContainerDied","Data":"740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff"} Nov 26 09:54:11 crc kubenswrapper[4940]: I1126 09:54:11.017265 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:54:12 crc kubenswrapper[4940]: I1126 09:54:12.028675 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkpcq" event={"ID":"547fa41b-e3d2-4003-baf8-81a9f572a4a4","Type":"ContainerStarted","Data":"38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608"} Nov 26 09:54:13 crc kubenswrapper[4940]: I1126 09:54:13.043703 4940 generic.go:334] "Generic (PLEG): container finished" podID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerID="38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608" exitCode=0 Nov 26 09:54:13 crc kubenswrapper[4940]: I1126 09:54:13.043765 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkpcq" event={"ID":"547fa41b-e3d2-4003-baf8-81a9f572a4a4","Type":"ContainerDied","Data":"38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608"} Nov 26 09:54:14 crc kubenswrapper[4940]: I1126 09:54:14.059259 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkpcq" event={"ID":"547fa41b-e3d2-4003-baf8-81a9f572a4a4","Type":"ContainerStarted","Data":"5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b"} Nov 26 09:54:14 crc kubenswrapper[4940]: I1126 09:54:14.100324 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tkpcq" podStartSLOduration=3.407084729 podStartE2EDuration="6.100294895s" podCreationTimestamp="2025-11-26 09:54:08 +0000 UTC" firstStartedPulling="2025-11-26 09:54:11.016954164 +0000 UTC m=+10752.537095793" lastFinishedPulling="2025-11-26 09:54:13.7101643 +0000 UTC m=+10755.230305959" observedRunningTime="2025-11-26 09:54:14.081746106 +0000 UTC m=+10755.601887755" watchObservedRunningTime="2025-11-26 09:54:14.100294895 +0000 UTC m=+10755.620436554" Nov 26 09:54:19 crc kubenswrapper[4940]: I1126 09:54:19.319128 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:19 crc kubenswrapper[4940]: I1126 09:54:19.321546 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:20 crc kubenswrapper[4940]: I1126 09:54:20.402122 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tkpcq" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="registry-server" probeResult="failure" output=< Nov 26 09:54:20 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:54:20 crc kubenswrapper[4940]: > Nov 26 09:54:21 crc kubenswrapper[4940]: I1126 09:54:21.728804 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:54:21 crc kubenswrapper[4940]: I1126 09:54:21.729311 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:54:21 crc kubenswrapper[4940]: I1126 09:54:21.729383 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:54:21 crc kubenswrapper[4940]: I1126 09:54:21.730574 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f4cd5bdaccff8541f7cd7902ad4e5c002312bb69798855d3e140b16f99d10c44"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:54:21 crc kubenswrapper[4940]: I1126 09:54:21.730679 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://f4cd5bdaccff8541f7cd7902ad4e5c002312bb69798855d3e140b16f99d10c44" gracePeriod=600 Nov 26 09:54:22 crc kubenswrapper[4940]: I1126 09:54:22.180137 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="f4cd5bdaccff8541f7cd7902ad4e5c002312bb69798855d3e140b16f99d10c44" exitCode=0 Nov 26 09:54:22 crc kubenswrapper[4940]: I1126 09:54:22.180240 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"f4cd5bdaccff8541f7cd7902ad4e5c002312bb69798855d3e140b16f99d10c44"} Nov 26 09:54:22 crc kubenswrapper[4940]: I1126 09:54:22.180545 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc"} Nov 26 09:54:22 crc kubenswrapper[4940]: I1126 09:54:22.180578 4940 scope.go:117] "RemoveContainer" containerID="137214d5cd0c65a6cf74ce79cbe7b0f12be22cf0143580609af66a8d00c8f3ba" Nov 26 09:54:29 crc kubenswrapper[4940]: I1126 09:54:29.405598 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:29 crc kubenswrapper[4940]: I1126 09:54:29.490640 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:29 crc kubenswrapper[4940]: I1126 09:54:29.668533 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tkpcq"] Nov 26 09:54:31 crc kubenswrapper[4940]: I1126 09:54:31.300425 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tkpcq" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="registry-server" containerID="cri-o://5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b" gracePeriod=2 Nov 26 09:54:31 crc kubenswrapper[4940]: I1126 09:54:31.885811 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.078653 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfsrp\" (UniqueName: \"kubernetes.io/projected/547fa41b-e3d2-4003-baf8-81a9f572a4a4-kube-api-access-qfsrp\") pod \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.078748 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-utilities\") pod \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.078800 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-catalog-content\") pod \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\" (UID: \"547fa41b-e3d2-4003-baf8-81a9f572a4a4\") " Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.080661 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-utilities" (OuterVolumeSpecName: "utilities") pod "547fa41b-e3d2-4003-baf8-81a9f572a4a4" (UID: "547fa41b-e3d2-4003-baf8-81a9f572a4a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.088848 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/547fa41b-e3d2-4003-baf8-81a9f572a4a4-kube-api-access-qfsrp" (OuterVolumeSpecName: "kube-api-access-qfsrp") pod "547fa41b-e3d2-4003-baf8-81a9f572a4a4" (UID: "547fa41b-e3d2-4003-baf8-81a9f572a4a4"). InnerVolumeSpecName "kube-api-access-qfsrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.172952 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "547fa41b-e3d2-4003-baf8-81a9f572a4a4" (UID: "547fa41b-e3d2-4003-baf8-81a9f572a4a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.181595 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfsrp\" (UniqueName: \"kubernetes.io/projected/547fa41b-e3d2-4003-baf8-81a9f572a4a4-kube-api-access-qfsrp\") on node \"crc\" DevicePath \"\"" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.181638 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.181652 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/547fa41b-e3d2-4003-baf8-81a9f572a4a4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.321363 4940 generic.go:334] "Generic (PLEG): container finished" podID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerID="5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b" exitCode=0 Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.321421 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkpcq" event={"ID":"547fa41b-e3d2-4003-baf8-81a9f572a4a4","Type":"ContainerDied","Data":"5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b"} Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.321463 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkpcq" event={"ID":"547fa41b-e3d2-4003-baf8-81a9f572a4a4","Type":"ContainerDied","Data":"f6f3fc5d8f0b2e10abee9111fc1e06ae02875fc2be1a06805e93c7c6b339322f"} Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.321485 4940 scope.go:117] "RemoveContainer" containerID="5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.321517 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tkpcq" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.386890 4940 scope.go:117] "RemoveContainer" containerID="38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.413002 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tkpcq"] Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.437345 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tkpcq"] Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.448480 4940 scope.go:117] "RemoveContainer" containerID="740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.499611 4940 scope.go:117] "RemoveContainer" containerID="5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b" Nov 26 09:54:32 crc kubenswrapper[4940]: E1126 09:54:32.500201 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b\": container with ID starting with 5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b not found: ID does not exist" containerID="5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.500435 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b"} err="failed to get container status \"5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b\": rpc error: code = NotFound desc = could not find container \"5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b\": container with ID starting with 5d98693ed588b210716993aa2d626af5d4b2f7fe52ea990d2d3a409d0fb92c6b not found: ID does not exist" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.500599 4940 scope.go:117] "RemoveContainer" containerID="38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608" Nov 26 09:54:32 crc kubenswrapper[4940]: E1126 09:54:32.501564 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608\": container with ID starting with 38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608 not found: ID does not exist" containerID="38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.501741 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608"} err="failed to get container status \"38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608\": rpc error: code = NotFound desc = could not find container \"38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608\": container with ID starting with 38d9d7ceaadf766e8d945be50673e9f855987ee279d569ad776f02006d363608 not found: ID does not exist" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.501860 4940 scope.go:117] "RemoveContainer" containerID="740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff" Nov 26 09:54:32 crc kubenswrapper[4940]: E1126 09:54:32.502417 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff\": container with ID starting with 740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff not found: ID does not exist" containerID="740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff" Nov 26 09:54:32 crc kubenswrapper[4940]: I1126 09:54:32.502559 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff"} err="failed to get container status \"740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff\": rpc error: code = NotFound desc = could not find container \"740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff\": container with ID starting with 740c1adfa57dcf57965db1b9fce7c8c378c975b999056cf35019fc20f1bfbbff not found: ID does not exist" Nov 26 09:54:33 crc kubenswrapper[4940]: I1126 09:54:33.185924 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" path="/var/lib/kubelet/pods/547fa41b-e3d2-4003-baf8-81a9f572a4a4/volumes" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.829184 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 09:55:57 crc kubenswrapper[4940]: E1126 09:55:57.830375 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="registry-server" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.830396 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="registry-server" Nov 26 09:55:57 crc kubenswrapper[4940]: E1126 09:55:57.830426 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="extract-utilities" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.830434 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="extract-utilities" Nov 26 09:55:57 crc kubenswrapper[4940]: E1126 09:55:57.830447 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="extract-content" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.830455 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="extract-content" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.830723 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="547fa41b-e3d2-4003-baf8-81a9f572a4a4" containerName="registry-server" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.831705 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.835469 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.835839 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.836009 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.836188 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-krr5d" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.839634 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.934730 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.934817 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.934858 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgc6v\" (UniqueName: \"kubernetes.io/projected/85a497f3-6d48-4234-92ac-98a55aa14977-kube-api-access-fgc6v\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.934903 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.934934 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.934958 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.934978 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-config-data\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.935013 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:57 crc kubenswrapper[4940]: I1126 09:55:57.935078 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037200 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037276 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037313 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037345 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-config-data\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037397 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037447 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037490 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037550 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.037587 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgc6v\" (UniqueName: \"kubernetes.io/projected/85a497f3-6d48-4234-92ac-98a55aa14977-kube-api-access-fgc6v\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.038047 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.038085 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.038733 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.038851 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-config-data\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.039262 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.043237 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.044665 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.055767 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.059961 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgc6v\" (UniqueName: \"kubernetes.io/projected/85a497f3-6d48-4234-92ac-98a55aa14977-kube-api-access-fgc6v\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.066429 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.147856 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 09:55:58 crc kubenswrapper[4940]: I1126 09:55:58.698654 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 09:55:59 crc kubenswrapper[4940]: I1126 09:55:59.559731 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"85a497f3-6d48-4234-92ac-98a55aa14977","Type":"ContainerStarted","Data":"e596beed55ede272610e900dd13de7966a48937703e575c62c60efa962b08a27"} Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.181209 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wj6wx"] Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.191567 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.237489 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wj6wx"] Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.298673 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-catalog-content\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.298795 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq59n\" (UniqueName: \"kubernetes.io/projected/f7de20a9-e094-4b79-81c0-f0ac759672da-kube-api-access-fq59n\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.298943 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-utilities\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.400500 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-utilities\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.400601 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-catalog-content\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.400696 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq59n\" (UniqueName: \"kubernetes.io/projected/f7de20a9-e094-4b79-81c0-f0ac759672da-kube-api-access-fq59n\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.401109 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-catalog-content\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.401360 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-utilities\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.419679 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq59n\" (UniqueName: \"kubernetes.io/projected/f7de20a9-e094-4b79-81c0-f0ac759672da-kube-api-access-fq59n\") pod \"certified-operators-wj6wx\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:12 crc kubenswrapper[4940]: I1126 09:56:12.561353 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:13 crc kubenswrapper[4940]: I1126 09:56:13.216612 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wj6wx"] Nov 26 09:56:13 crc kubenswrapper[4940]: I1126 09:56:13.736118 4940 generic.go:334] "Generic (PLEG): container finished" podID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerID="510d5feeaa2cdf408adbcc6e6b6ca44329d612625f9fc5de9322886910733f38" exitCode=0 Nov 26 09:56:13 crc kubenswrapper[4940]: I1126 09:56:13.736156 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj6wx" event={"ID":"f7de20a9-e094-4b79-81c0-f0ac759672da","Type":"ContainerDied","Data":"510d5feeaa2cdf408adbcc6e6b6ca44329d612625f9fc5de9322886910733f38"} Nov 26 09:56:13 crc kubenswrapper[4940]: I1126 09:56:13.736180 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj6wx" event={"ID":"f7de20a9-e094-4b79-81c0-f0ac759672da","Type":"ContainerStarted","Data":"78ec49bc7dd692d867f8d79a85b46aa685f437c6078b888d6f1212e6760c7266"} Nov 26 09:56:15 crc kubenswrapper[4940]: I1126 09:56:15.760149 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj6wx" event={"ID":"f7de20a9-e094-4b79-81c0-f0ac759672da","Type":"ContainerStarted","Data":"fbc77d0ee92d8212ef02af5400de8ee0854ba8d32a6a47a71de4c24971afb1db"} Nov 26 09:56:17 crc kubenswrapper[4940]: I1126 09:56:17.823457 4940 generic.go:334] "Generic (PLEG): container finished" podID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerID="fbc77d0ee92d8212ef02af5400de8ee0854ba8d32a6a47a71de4c24971afb1db" exitCode=0 Nov 26 09:56:17 crc kubenswrapper[4940]: I1126 09:56:17.823784 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj6wx" event={"ID":"f7de20a9-e094-4b79-81c0-f0ac759672da","Type":"ContainerDied","Data":"fbc77d0ee92d8212ef02af5400de8ee0854ba8d32a6a47a71de4c24971afb1db"} Nov 26 09:56:19 crc kubenswrapper[4940]: I1126 09:56:19.851085 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj6wx" event={"ID":"f7de20a9-e094-4b79-81c0-f0ac759672da","Type":"ContainerStarted","Data":"82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3"} Nov 26 09:56:19 crc kubenswrapper[4940]: I1126 09:56:19.876966 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wj6wx" podStartSLOduration=3.050232928 podStartE2EDuration="7.876942907s" podCreationTimestamp="2025-11-26 09:56:12 +0000 UTC" firstStartedPulling="2025-11-26 09:56:13.738182454 +0000 UTC m=+10875.258324093" lastFinishedPulling="2025-11-26 09:56:18.564892453 +0000 UTC m=+10880.085034072" observedRunningTime="2025-11-26 09:56:19.876326357 +0000 UTC m=+10881.396467996" watchObservedRunningTime="2025-11-26 09:56:19.876942907 +0000 UTC m=+10881.397084526" Nov 26 09:56:22 crc kubenswrapper[4940]: I1126 09:56:22.562203 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:22 crc kubenswrapper[4940]: I1126 09:56:22.562538 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:22 crc kubenswrapper[4940]: I1126 09:56:22.627695 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:32 crc kubenswrapper[4940]: I1126 09:56:32.617655 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:32 crc kubenswrapper[4940]: I1126 09:56:32.672910 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wj6wx"] Nov 26 09:56:33 crc kubenswrapper[4940]: I1126 09:56:33.011297 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wj6wx" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="registry-server" containerID="cri-o://82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3" gracePeriod=2 Nov 26 09:56:34 crc kubenswrapper[4940]: I1126 09:56:34.025007 4940 generic.go:334] "Generic (PLEG): container finished" podID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerID="82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3" exitCode=0 Nov 26 09:56:34 crc kubenswrapper[4940]: I1126 09:56:34.025211 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj6wx" event={"ID":"f7de20a9-e094-4b79-81c0-f0ac759672da","Type":"ContainerDied","Data":"82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3"} Nov 26 09:56:42 crc kubenswrapper[4940]: E1126 09:56:42.562677 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3 is running failed: container process not found" containerID="82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 09:56:42 crc kubenswrapper[4940]: E1126 09:56:42.563553 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3 is running failed: container process not found" containerID="82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 09:56:42 crc kubenswrapper[4940]: E1126 09:56:42.564587 4940 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3 is running failed: container process not found" containerID="82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3" cmd=["grpc_health_probe","-addr=:50051"] Nov 26 09:56:42 crc kubenswrapper[4940]: E1126 09:56:42.564621 4940 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-wj6wx" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="registry-server" Nov 26 09:56:46 crc kubenswrapper[4940]: E1126 09:56:46.467293 4940 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:1f5c0439f2433cb462b222a5bb23e629" Nov 26 09:56:46 crc kubenswrapper[4940]: E1126 09:56:46.467885 4940 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:1f5c0439f2433cb462b222a5bb23e629" Nov 26 09:56:46 crc kubenswrapper[4940]: E1126 09:56:46.468116 4940 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:1f5c0439f2433cb462b222a5bb23e629,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fgc6v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(85a497f3-6d48-4234-92ac-98a55aa14977): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 09:56:46 crc kubenswrapper[4940]: E1126 09:56:46.469380 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="85a497f3-6d48-4234-92ac-98a55aa14977" Nov 26 09:56:46 crc kubenswrapper[4940]: I1126 09:56:46.795088 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:46 crc kubenswrapper[4940]: I1126 09:56:46.953852 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fq59n\" (UniqueName: \"kubernetes.io/projected/f7de20a9-e094-4b79-81c0-f0ac759672da-kube-api-access-fq59n\") pod \"f7de20a9-e094-4b79-81c0-f0ac759672da\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " Nov 26 09:56:46 crc kubenswrapper[4940]: I1126 09:56:46.954158 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-utilities\") pod \"f7de20a9-e094-4b79-81c0-f0ac759672da\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " Nov 26 09:56:46 crc kubenswrapper[4940]: I1126 09:56:46.954284 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-catalog-content\") pod \"f7de20a9-e094-4b79-81c0-f0ac759672da\" (UID: \"f7de20a9-e094-4b79-81c0-f0ac759672da\") " Nov 26 09:56:46 crc kubenswrapper[4940]: I1126 09:56:46.956228 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-utilities" (OuterVolumeSpecName: "utilities") pod "f7de20a9-e094-4b79-81c0-f0ac759672da" (UID: "f7de20a9-e094-4b79-81c0-f0ac759672da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:56:46 crc kubenswrapper[4940]: I1126 09:56:46.963269 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7de20a9-e094-4b79-81c0-f0ac759672da-kube-api-access-fq59n" (OuterVolumeSpecName: "kube-api-access-fq59n") pod "f7de20a9-e094-4b79-81c0-f0ac759672da" (UID: "f7de20a9-e094-4b79-81c0-f0ac759672da"). InnerVolumeSpecName "kube-api-access-fq59n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.023170 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7de20a9-e094-4b79-81c0-f0ac759672da" (UID: "f7de20a9-e094-4b79-81c0-f0ac759672da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.057282 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fq59n\" (UniqueName: \"kubernetes.io/projected/f7de20a9-e094-4b79-81c0-f0ac759672da-kube-api-access-fq59n\") on node \"crc\" DevicePath \"\"" Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.057314 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.057324 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7de20a9-e094-4b79-81c0-f0ac759672da-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.208715 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj6wx" event={"ID":"f7de20a9-e094-4b79-81c0-f0ac759672da","Type":"ContainerDied","Data":"78ec49bc7dd692d867f8d79a85b46aa685f437c6078b888d6f1212e6760c7266"} Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.208763 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wj6wx" Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.208808 4940 scope.go:117] "RemoveContainer" containerID="82bf0b6961ffce168cd81a3f2303f46441348f2565b2cd42d4a484f1330f0af3" Nov 26 09:56:47 crc kubenswrapper[4940]: E1126 09:56:47.211904 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:1f5c0439f2433cb462b222a5bb23e629\\\"\"" pod="openstack/tempest-tests-tempest" podUID="85a497f3-6d48-4234-92ac-98a55aa14977" Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.250541 4940 scope.go:117] "RemoveContainer" containerID="fbc77d0ee92d8212ef02af5400de8ee0854ba8d32a6a47a71de4c24971afb1db" Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.289806 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wj6wx"] Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.304599 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wj6wx"] Nov 26 09:56:47 crc kubenswrapper[4940]: I1126 09:56:47.312923 4940 scope.go:117] "RemoveContainer" containerID="510d5feeaa2cdf408adbcc6e6b6ca44329d612625f9fc5de9322886910733f38" Nov 26 09:56:49 crc kubenswrapper[4940]: I1126 09:56:49.181989 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" path="/var/lib/kubelet/pods/f7de20a9-e094-4b79-81c0-f0ac759672da/volumes" Nov 26 09:56:51 crc kubenswrapper[4940]: I1126 09:56:51.728935 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:56:51 crc kubenswrapper[4940]: I1126 09:56:51.729450 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:56:58 crc kubenswrapper[4940]: I1126 09:56:58.313592 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 26 09:56:59 crc kubenswrapper[4940]: I1126 09:56:59.360670 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"85a497f3-6d48-4234-92ac-98a55aa14977","Type":"ContainerStarted","Data":"376fd20da22a69bc242ba64e1fdad8e4863d34be7c45b5764cf15f2ba41c2eb3"} Nov 26 09:56:59 crc kubenswrapper[4940]: I1126 09:56:59.391455 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.788239558 podStartE2EDuration="1m3.391439318s" podCreationTimestamp="2025-11-26 09:55:56 +0000 UTC" firstStartedPulling="2025-11-26 09:55:58.707590144 +0000 UTC m=+10860.227731773" lastFinishedPulling="2025-11-26 09:56:58.310789914 +0000 UTC m=+10919.830931533" observedRunningTime="2025-11-26 09:56:59.382402642 +0000 UTC m=+10920.902544251" watchObservedRunningTime="2025-11-26 09:56:59.391439318 +0000 UTC m=+10920.911580937" Nov 26 09:57:21 crc kubenswrapper[4940]: I1126 09:57:21.729239 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:57:21 crc kubenswrapper[4940]: I1126 09:57:21.729953 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.711555 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qr8xg"] Nov 26 09:57:36 crc kubenswrapper[4940]: E1126 09:57:36.712398 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="extract-content" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.712411 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="extract-content" Nov 26 09:57:36 crc kubenswrapper[4940]: E1126 09:57:36.712454 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="registry-server" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.712460 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="registry-server" Nov 26 09:57:36 crc kubenswrapper[4940]: E1126 09:57:36.712481 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="extract-utilities" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.712487 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="extract-utilities" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.712664 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7de20a9-e094-4b79-81c0-f0ac759672da" containerName="registry-server" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.714200 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.744757 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qr8xg"] Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.860771 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-utilities\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.860839 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jphxl\" (UniqueName: \"kubernetes.io/projected/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-kube-api-access-jphxl\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.861110 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-catalog-content\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.963457 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-catalog-content\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.963576 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-utilities\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.963603 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jphxl\" (UniqueName: \"kubernetes.io/projected/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-kube-api-access-jphxl\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.964414 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-catalog-content\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.964529 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-utilities\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:36 crc kubenswrapper[4940]: I1126 09:57:36.980717 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jphxl\" (UniqueName: \"kubernetes.io/projected/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-kube-api-access-jphxl\") pod \"redhat-marketplace-qr8xg\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.046837 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.316207 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gmf47"] Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.318597 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.327356 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gmf47"] Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.473714 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-catalog-content\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.474431 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwhdx\" (UniqueName: \"kubernetes.io/projected/6246f9ae-0897-480c-b349-9b3071fceeb1-kube-api-access-kwhdx\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.474518 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-utilities\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.506065 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qr8xg"] Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.576897 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwhdx\" (UniqueName: \"kubernetes.io/projected/6246f9ae-0897-480c-b349-9b3071fceeb1-kube-api-access-kwhdx\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.576941 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-utilities\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.577012 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-catalog-content\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.578592 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-utilities\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.578655 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-catalog-content\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.595923 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwhdx\" (UniqueName: \"kubernetes.io/projected/6246f9ae-0897-480c-b349-9b3071fceeb1-kube-api-access-kwhdx\") pod \"community-operators-gmf47\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.655472 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.852899 4940 generic.go:334] "Generic (PLEG): container finished" podID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerID="566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284" exitCode=0 Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.853173 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qr8xg" event={"ID":"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5","Type":"ContainerDied","Data":"566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284"} Nov 26 09:57:37 crc kubenswrapper[4940]: I1126 09:57:37.853201 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qr8xg" event={"ID":"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5","Type":"ContainerStarted","Data":"d6ffc7750fac2e77a97447980171bbaaa4e3c4f99b878ea78ef3406ec58855f5"} Nov 26 09:57:38 crc kubenswrapper[4940]: I1126 09:57:38.154732 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gmf47"] Nov 26 09:57:38 crc kubenswrapper[4940]: I1126 09:57:38.873117 4940 generic.go:334] "Generic (PLEG): container finished" podID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerID="4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2" exitCode=0 Nov 26 09:57:38 crc kubenswrapper[4940]: I1126 09:57:38.873723 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmf47" event={"ID":"6246f9ae-0897-480c-b349-9b3071fceeb1","Type":"ContainerDied","Data":"4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2"} Nov 26 09:57:38 crc kubenswrapper[4940]: I1126 09:57:38.873796 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmf47" event={"ID":"6246f9ae-0897-480c-b349-9b3071fceeb1","Type":"ContainerStarted","Data":"d8fdf7aeb91c8aefb3085dc5da1ba39aff8ea0b615aaaf12c6d97750fd61fb53"} Nov 26 09:57:38 crc kubenswrapper[4940]: I1126 09:57:38.878619 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qr8xg" event={"ID":"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5","Type":"ContainerStarted","Data":"f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193"} Nov 26 09:57:39 crc kubenswrapper[4940]: I1126 09:57:39.891054 4940 generic.go:334] "Generic (PLEG): container finished" podID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerID="f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193" exitCode=0 Nov 26 09:57:39 crc kubenswrapper[4940]: I1126 09:57:39.891101 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qr8xg" event={"ID":"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5","Type":"ContainerDied","Data":"f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193"} Nov 26 09:57:40 crc kubenswrapper[4940]: I1126 09:57:40.909670 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmf47" event={"ID":"6246f9ae-0897-480c-b349-9b3071fceeb1","Type":"ContainerStarted","Data":"4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb"} Nov 26 09:57:41 crc kubenswrapper[4940]: I1126 09:57:41.920703 4940 generic.go:334] "Generic (PLEG): container finished" podID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerID="4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb" exitCode=0 Nov 26 09:57:41 crc kubenswrapper[4940]: I1126 09:57:41.920788 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmf47" event={"ID":"6246f9ae-0897-480c-b349-9b3071fceeb1","Type":"ContainerDied","Data":"4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb"} Nov 26 09:57:41 crc kubenswrapper[4940]: I1126 09:57:41.925713 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qr8xg" event={"ID":"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5","Type":"ContainerStarted","Data":"67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af"} Nov 26 09:57:41 crc kubenswrapper[4940]: I1126 09:57:41.960977 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qr8xg" podStartSLOduration=2.20166742 podStartE2EDuration="5.960955805s" podCreationTimestamp="2025-11-26 09:57:36 +0000 UTC" firstStartedPulling="2025-11-26 09:57:37.857311453 +0000 UTC m=+10959.377453072" lastFinishedPulling="2025-11-26 09:57:41.616599828 +0000 UTC m=+10963.136741457" observedRunningTime="2025-11-26 09:57:41.955036957 +0000 UTC m=+10963.475178576" watchObservedRunningTime="2025-11-26 09:57:41.960955805 +0000 UTC m=+10963.481097424" Nov 26 09:57:42 crc kubenswrapper[4940]: I1126 09:57:42.939108 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmf47" event={"ID":"6246f9ae-0897-480c-b349-9b3071fceeb1","Type":"ContainerStarted","Data":"4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999"} Nov 26 09:57:42 crc kubenswrapper[4940]: I1126 09:57:42.957591 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gmf47" podStartSLOduration=2.356825772 podStartE2EDuration="5.957573441s" podCreationTimestamp="2025-11-26 09:57:37 +0000 UTC" firstStartedPulling="2025-11-26 09:57:38.879405217 +0000 UTC m=+10960.399546836" lastFinishedPulling="2025-11-26 09:57:42.480152886 +0000 UTC m=+10964.000294505" observedRunningTime="2025-11-26 09:57:42.955103162 +0000 UTC m=+10964.475244781" watchObservedRunningTime="2025-11-26 09:57:42.957573441 +0000 UTC m=+10964.477715060" Nov 26 09:57:47 crc kubenswrapper[4940]: I1126 09:57:47.047787 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:47 crc kubenswrapper[4940]: I1126 09:57:47.048292 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:47 crc kubenswrapper[4940]: I1126 09:57:47.656549 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:47 crc kubenswrapper[4940]: I1126 09:57:47.656599 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:47 crc kubenswrapper[4940]: I1126 09:57:47.763596 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:48 crc kubenswrapper[4940]: I1126 09:57:48.085204 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:48 crc kubenswrapper[4940]: I1126 09:57:48.114398 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-qr8xg" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="registry-server" probeResult="failure" output=< Nov 26 09:57:48 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 09:57:48 crc kubenswrapper[4940]: > Nov 26 09:57:48 crc kubenswrapper[4940]: I1126 09:57:48.147484 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gmf47"] Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.019214 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gmf47" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerName="registry-server" containerID="cri-o://4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999" gracePeriod=2 Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.679839 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.762485 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-utilities\") pod \"6246f9ae-0897-480c-b349-9b3071fceeb1\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.762713 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwhdx\" (UniqueName: \"kubernetes.io/projected/6246f9ae-0897-480c-b349-9b3071fceeb1-kube-api-access-kwhdx\") pod \"6246f9ae-0897-480c-b349-9b3071fceeb1\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.762786 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-catalog-content\") pod \"6246f9ae-0897-480c-b349-9b3071fceeb1\" (UID: \"6246f9ae-0897-480c-b349-9b3071fceeb1\") " Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.764287 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-utilities" (OuterVolumeSpecName: "utilities") pod "6246f9ae-0897-480c-b349-9b3071fceeb1" (UID: "6246f9ae-0897-480c-b349-9b3071fceeb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.764398 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.779255 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6246f9ae-0897-480c-b349-9b3071fceeb1-kube-api-access-kwhdx" (OuterVolumeSpecName: "kube-api-access-kwhdx") pod "6246f9ae-0897-480c-b349-9b3071fceeb1" (UID: "6246f9ae-0897-480c-b349-9b3071fceeb1"). InnerVolumeSpecName "kube-api-access-kwhdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.826210 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6246f9ae-0897-480c-b349-9b3071fceeb1" (UID: "6246f9ae-0897-480c-b349-9b3071fceeb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.866662 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwhdx\" (UniqueName: \"kubernetes.io/projected/6246f9ae-0897-480c-b349-9b3071fceeb1-kube-api-access-kwhdx\") on node \"crc\" DevicePath \"\"" Nov 26 09:57:50 crc kubenswrapper[4940]: I1126 09:57:50.866695 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6246f9ae-0897-480c-b349-9b3071fceeb1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.029915 4940 generic.go:334] "Generic (PLEG): container finished" podID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerID="4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999" exitCode=0 Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.029958 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmf47" event={"ID":"6246f9ae-0897-480c-b349-9b3071fceeb1","Type":"ContainerDied","Data":"4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999"} Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.029984 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gmf47" event={"ID":"6246f9ae-0897-480c-b349-9b3071fceeb1","Type":"ContainerDied","Data":"d8fdf7aeb91c8aefb3085dc5da1ba39aff8ea0b615aaaf12c6d97750fd61fb53"} Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.030000 4940 scope.go:117] "RemoveContainer" containerID="4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.030081 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gmf47" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.056594 4940 scope.go:117] "RemoveContainer" containerID="4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.067005 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gmf47"] Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.077645 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gmf47"] Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.083161 4940 scope.go:117] "RemoveContainer" containerID="4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.124353 4940 scope.go:117] "RemoveContainer" containerID="4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999" Nov 26 09:57:51 crc kubenswrapper[4940]: E1126 09:57:51.124825 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999\": container with ID starting with 4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999 not found: ID does not exist" containerID="4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.124871 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999"} err="failed to get container status \"4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999\": rpc error: code = NotFound desc = could not find container \"4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999\": container with ID starting with 4954638f3296dce411dfba89ba063100e05105f240ed494efb74f9a0d0107999 not found: ID does not exist" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.124899 4940 scope.go:117] "RemoveContainer" containerID="4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb" Nov 26 09:57:51 crc kubenswrapper[4940]: E1126 09:57:51.125645 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb\": container with ID starting with 4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb not found: ID does not exist" containerID="4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.125700 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb"} err="failed to get container status \"4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb\": rpc error: code = NotFound desc = could not find container \"4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb\": container with ID starting with 4d9af6b1cc8b3a3b385503a56f850cca8a5377396876db1c77876eee3b263fcb not found: ID does not exist" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.125730 4940 scope.go:117] "RemoveContainer" containerID="4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2" Nov 26 09:57:51 crc kubenswrapper[4940]: E1126 09:57:51.126029 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2\": container with ID starting with 4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2 not found: ID does not exist" containerID="4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.126079 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2"} err="failed to get container status \"4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2\": rpc error: code = NotFound desc = could not find container \"4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2\": container with ID starting with 4e8f362046fa3b39a390586ca0187d512cc33eccd61466e6120163c41330bfb2 not found: ID does not exist" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.179518 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" path="/var/lib/kubelet/pods/6246f9ae-0897-480c-b349-9b3071fceeb1/volumes" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.728191 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.728508 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.728548 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.729379 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:57:51 crc kubenswrapper[4940]: I1126 09:57:51.729439 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" gracePeriod=600 Nov 26 09:57:51 crc kubenswrapper[4940]: E1126 09:57:51.863252 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:57:52 crc kubenswrapper[4940]: I1126 09:57:52.041982 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" exitCode=0 Nov 26 09:57:52 crc kubenswrapper[4940]: I1126 09:57:52.042258 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc"} Nov 26 09:57:52 crc kubenswrapper[4940]: I1126 09:57:52.042416 4940 scope.go:117] "RemoveContainer" containerID="f4cd5bdaccff8541f7cd7902ad4e5c002312bb69798855d3e140b16f99d10c44" Nov 26 09:57:52 crc kubenswrapper[4940]: I1126 09:57:52.043215 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:57:52 crc kubenswrapper[4940]: E1126 09:57:52.043551 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:57:57 crc kubenswrapper[4940]: I1126 09:57:57.108360 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:57 crc kubenswrapper[4940]: I1126 09:57:57.161266 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:57 crc kubenswrapper[4940]: I1126 09:57:57.345309 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qr8xg"] Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.123776 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qr8xg" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="registry-server" containerID="cri-o://67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af" gracePeriod=2 Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.748508 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.846824 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-catalog-content\") pod \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.846919 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-utilities\") pod \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.847066 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jphxl\" (UniqueName: \"kubernetes.io/projected/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-kube-api-access-jphxl\") pod \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\" (UID: \"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5\") " Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.848002 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-utilities" (OuterVolumeSpecName: "utilities") pod "81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" (UID: "81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.853688 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-kube-api-access-jphxl" (OuterVolumeSpecName: "kube-api-access-jphxl") pod "81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" (UID: "81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5"). InnerVolumeSpecName "kube-api-access-jphxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.868356 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" (UID: "81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.949150 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jphxl\" (UniqueName: \"kubernetes.io/projected/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-kube-api-access-jphxl\") on node \"crc\" DevicePath \"\"" Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.949189 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:57:59 crc kubenswrapper[4940]: I1126 09:57:59.949202 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.135847 4940 generic.go:334] "Generic (PLEG): container finished" podID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerID="67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af" exitCode=0 Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.135898 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qr8xg" event={"ID":"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5","Type":"ContainerDied","Data":"67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af"} Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.135937 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qr8xg" event={"ID":"81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5","Type":"ContainerDied","Data":"d6ffc7750fac2e77a97447980171bbaaa4e3c4f99b878ea78ef3406ec58855f5"} Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.135959 4940 scope.go:117] "RemoveContainer" containerID="67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.135953 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qr8xg" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.159325 4940 scope.go:117] "RemoveContainer" containerID="f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.182688 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qr8xg"] Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.196757 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qr8xg"] Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.206122 4940 scope.go:117] "RemoveContainer" containerID="566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.257436 4940 scope.go:117] "RemoveContainer" containerID="67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af" Nov 26 09:58:00 crc kubenswrapper[4940]: E1126 09:58:00.260346 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af\": container with ID starting with 67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af not found: ID does not exist" containerID="67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.260381 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af"} err="failed to get container status \"67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af\": rpc error: code = NotFound desc = could not find container \"67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af\": container with ID starting with 67eba81116067965263b66bbcb5197a4205df3bb2d1284c856dd1f8edb2e34af not found: ID does not exist" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.260405 4940 scope.go:117] "RemoveContainer" containerID="f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193" Nov 26 09:58:00 crc kubenswrapper[4940]: E1126 09:58:00.260673 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193\": container with ID starting with f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193 not found: ID does not exist" containerID="f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.260696 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193"} err="failed to get container status \"f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193\": rpc error: code = NotFound desc = could not find container \"f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193\": container with ID starting with f74cfb5170668a988965471b7ac77f3b262679153c5c9ced9b6958036673b193 not found: ID does not exist" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.260708 4940 scope.go:117] "RemoveContainer" containerID="566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284" Nov 26 09:58:00 crc kubenswrapper[4940]: E1126 09:58:00.260976 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284\": container with ID starting with 566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284 not found: ID does not exist" containerID="566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284" Nov 26 09:58:00 crc kubenswrapper[4940]: I1126 09:58:00.260994 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284"} err="failed to get container status \"566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284\": rpc error: code = NotFound desc = could not find container \"566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284\": container with ID starting with 566a15ded26e7e06672bc5578d76e0087cc26c9b2034f63b416fba38f05db284 not found: ID does not exist" Nov 26 09:58:01 crc kubenswrapper[4940]: I1126 09:58:01.176954 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" path="/var/lib/kubelet/pods/81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5/volumes" Nov 26 09:58:07 crc kubenswrapper[4940]: I1126 09:58:07.165980 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:58:07 crc kubenswrapper[4940]: E1126 09:58:07.166537 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:58:19 crc kubenswrapper[4940]: I1126 09:58:19.173477 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:58:19 crc kubenswrapper[4940]: E1126 09:58:19.174324 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:58:30 crc kubenswrapper[4940]: I1126 09:58:30.165742 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:58:30 crc kubenswrapper[4940]: E1126 09:58:30.166392 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:58:43 crc kubenswrapper[4940]: I1126 09:58:43.165965 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:58:43 crc kubenswrapper[4940]: E1126 09:58:43.166786 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:58:58 crc kubenswrapper[4940]: I1126 09:58:58.165981 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:58:58 crc kubenswrapper[4940]: E1126 09:58:58.166628 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:59:09 crc kubenswrapper[4940]: I1126 09:59:09.172574 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:59:09 crc kubenswrapper[4940]: E1126 09:59:09.173348 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:59:20 crc kubenswrapper[4940]: I1126 09:59:20.166384 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:59:20 crc kubenswrapper[4940]: E1126 09:59:20.167303 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:59:31 crc kubenswrapper[4940]: I1126 09:59:31.165482 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:59:31 crc kubenswrapper[4940]: E1126 09:59:31.166174 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 09:59:46 crc kubenswrapper[4940]: I1126 09:59:46.165627 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 09:59:46 crc kubenswrapper[4940]: E1126 09:59:46.166549 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.155770 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss"] Nov 26 10:00:00 crc kubenswrapper[4940]: E1126 10:00:00.157287 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerName="extract-content" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.157317 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerName="extract-content" Nov 26 10:00:00 crc kubenswrapper[4940]: E1126 10:00:00.157389 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerName="registry-server" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.157406 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerName="registry-server" Nov 26 10:00:00 crc kubenswrapper[4940]: E1126 10:00:00.157441 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="registry-server" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.157457 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="registry-server" Nov 26 10:00:00 crc kubenswrapper[4940]: E1126 10:00:00.157509 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="extract-utilities" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.157527 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="extract-utilities" Nov 26 10:00:00 crc kubenswrapper[4940]: E1126 10:00:00.157587 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerName="extract-utilities" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.157603 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerName="extract-utilities" Nov 26 10:00:00 crc kubenswrapper[4940]: E1126 10:00:00.157661 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="extract-content" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.157701 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="extract-content" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.158096 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="81fbab3d-55c8-4a8d-bb3b-6b44e6f96ad5" containerName="registry-server" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.158176 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6246f9ae-0897-480c-b349-9b3071fceeb1" containerName="registry-server" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.159431 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.161932 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.163199 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.173942 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss"] Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.281870 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fdc0b40e-5cca-4c1e-beed-7c387dac378d-secret-volume\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.282031 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fdc0b40e-5cca-4c1e-beed-7c387dac378d-config-volume\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.282207 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm6nb\" (UniqueName: \"kubernetes.io/projected/fdc0b40e-5cca-4c1e-beed-7c387dac378d-kube-api-access-vm6nb\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.384014 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fdc0b40e-5cca-4c1e-beed-7c387dac378d-config-volume\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.384189 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm6nb\" (UniqueName: \"kubernetes.io/projected/fdc0b40e-5cca-4c1e-beed-7c387dac378d-kube-api-access-vm6nb\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.384367 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fdc0b40e-5cca-4c1e-beed-7c387dac378d-secret-volume\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.385058 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fdc0b40e-5cca-4c1e-beed-7c387dac378d-config-volume\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.394710 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fdc0b40e-5cca-4c1e-beed-7c387dac378d-secret-volume\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.417690 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm6nb\" (UniqueName: \"kubernetes.io/projected/fdc0b40e-5cca-4c1e-beed-7c387dac378d-kube-api-access-vm6nb\") pod \"collect-profiles-29402520-7crss\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.503477 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:00 crc kubenswrapper[4940]: I1126 10:00:00.997342 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss"] Nov 26 10:00:01 crc kubenswrapper[4940]: W1126 10:00:01.031977 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfdc0b40e_5cca_4c1e_beed_7c387dac378d.slice/crio-feebeca42171c04c2428ff5ea0e1072735c57bdd4e982a06995c8fc541c7967d WatchSource:0}: Error finding container feebeca42171c04c2428ff5ea0e1072735c57bdd4e982a06995c8fc541c7967d: Status 404 returned error can't find the container with id feebeca42171c04c2428ff5ea0e1072735c57bdd4e982a06995c8fc541c7967d Nov 26 10:00:01 crc kubenswrapper[4940]: I1126 10:00:01.165430 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:00:01 crc kubenswrapper[4940]: E1126 10:00:01.165765 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:00:01 crc kubenswrapper[4940]: I1126 10:00:01.509019 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" event={"ID":"fdc0b40e-5cca-4c1e-beed-7c387dac378d","Type":"ContainerStarted","Data":"f7980199cd49643d390f8085cf6794cea4be5f13f1993ef582da0e2d09a8af0b"} Nov 26 10:00:01 crc kubenswrapper[4940]: I1126 10:00:01.509310 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" event={"ID":"fdc0b40e-5cca-4c1e-beed-7c387dac378d","Type":"ContainerStarted","Data":"feebeca42171c04c2428ff5ea0e1072735c57bdd4e982a06995c8fc541c7967d"} Nov 26 10:00:01 crc kubenswrapper[4940]: I1126 10:00:01.528571 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" podStartSLOduration=1.528550911 podStartE2EDuration="1.528550911s" podCreationTimestamp="2025-11-26 10:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 10:00:01.526899568 +0000 UTC m=+11103.047041177" watchObservedRunningTime="2025-11-26 10:00:01.528550911 +0000 UTC m=+11103.048692530" Nov 26 10:00:02 crc kubenswrapper[4940]: I1126 10:00:02.518702 4940 generic.go:334] "Generic (PLEG): container finished" podID="fdc0b40e-5cca-4c1e-beed-7c387dac378d" containerID="f7980199cd49643d390f8085cf6794cea4be5f13f1993ef582da0e2d09a8af0b" exitCode=0 Nov 26 10:00:02 crc kubenswrapper[4940]: I1126 10:00:02.518803 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" event={"ID":"fdc0b40e-5cca-4c1e-beed-7c387dac378d","Type":"ContainerDied","Data":"f7980199cd49643d390f8085cf6794cea4be5f13f1993ef582da0e2d09a8af0b"} Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.198679 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.371493 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vm6nb\" (UniqueName: \"kubernetes.io/projected/fdc0b40e-5cca-4c1e-beed-7c387dac378d-kube-api-access-vm6nb\") pod \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.372088 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fdc0b40e-5cca-4c1e-beed-7c387dac378d-config-volume\") pod \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.372263 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fdc0b40e-5cca-4c1e-beed-7c387dac378d-secret-volume\") pod \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\" (UID: \"fdc0b40e-5cca-4c1e-beed-7c387dac378d\") " Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.373713 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdc0b40e-5cca-4c1e-beed-7c387dac378d-config-volume" (OuterVolumeSpecName: "config-volume") pod "fdc0b40e-5cca-4c1e-beed-7c387dac378d" (UID: "fdc0b40e-5cca-4c1e-beed-7c387dac378d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.379266 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdc0b40e-5cca-4c1e-beed-7c387dac378d-kube-api-access-vm6nb" (OuterVolumeSpecName: "kube-api-access-vm6nb") pod "fdc0b40e-5cca-4c1e-beed-7c387dac378d" (UID: "fdc0b40e-5cca-4c1e-beed-7c387dac378d"). InnerVolumeSpecName "kube-api-access-vm6nb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.380430 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdc0b40e-5cca-4c1e-beed-7c387dac378d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fdc0b40e-5cca-4c1e-beed-7c387dac378d" (UID: "fdc0b40e-5cca-4c1e-beed-7c387dac378d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.475135 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vm6nb\" (UniqueName: \"kubernetes.io/projected/fdc0b40e-5cca-4c1e-beed-7c387dac378d-kube-api-access-vm6nb\") on node \"crc\" DevicePath \"\"" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.475175 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fdc0b40e-5cca-4c1e-beed-7c387dac378d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.475183 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fdc0b40e-5cca-4c1e-beed-7c387dac378d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.540325 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" event={"ID":"fdc0b40e-5cca-4c1e-beed-7c387dac378d","Type":"ContainerDied","Data":"feebeca42171c04c2428ff5ea0e1072735c57bdd4e982a06995c8fc541c7967d"} Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.540371 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="feebeca42171c04c2428ff5ea0e1072735c57bdd4e982a06995c8fc541c7967d" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.540420 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402520-7crss" Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.604013 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd"] Nov 26 10:00:04 crc kubenswrapper[4940]: I1126 10:00:04.615802 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402475-hzbwd"] Nov 26 10:00:05 crc kubenswrapper[4940]: I1126 10:00:05.185422 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8939c50e-fda8-4489-8901-9cd1484b0123" path="/var/lib/kubelet/pods/8939c50e-fda8-4489-8901-9cd1484b0123/volumes" Nov 26 10:00:15 crc kubenswrapper[4940]: I1126 10:00:15.165384 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:00:15 crc kubenswrapper[4940]: E1126 10:00:15.166261 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:00:18 crc kubenswrapper[4940]: I1126 10:00:18.431423 4940 scope.go:117] "RemoveContainer" containerID="82e76cbc1d056bf0cff87bb9993e50109360370b824f052b649f7f2b01d2eb5b" Nov 26 10:00:27 crc kubenswrapper[4940]: I1126 10:00:27.165420 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:00:27 crc kubenswrapper[4940]: E1126 10:00:27.166057 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:00:38 crc kubenswrapper[4940]: I1126 10:00:38.166261 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:00:38 crc kubenswrapper[4940]: E1126 10:00:38.167139 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:00:53 crc kubenswrapper[4940]: I1126 10:00:53.166487 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:00:53 crc kubenswrapper[4940]: E1126 10:00:53.167081 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.165701 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29402521-d622q"] Nov 26 10:01:00 crc kubenswrapper[4940]: E1126 10:01:00.166680 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdc0b40e-5cca-4c1e-beed-7c387dac378d" containerName="collect-profiles" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.166696 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdc0b40e-5cca-4c1e-beed-7c387dac378d" containerName="collect-profiles" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.166890 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdc0b40e-5cca-4c1e-beed-7c387dac378d" containerName="collect-profiles" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.167889 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.180448 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402521-d622q"] Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.256548 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-combined-ca-bundle\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.256678 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc58w\" (UniqueName: \"kubernetes.io/projected/34591905-4a5d-490e-9fb2-2cf409cd3aa0-kube-api-access-bc58w\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.256743 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-config-data\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.256770 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-fernet-keys\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.358127 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc58w\" (UniqueName: \"kubernetes.io/projected/34591905-4a5d-490e-9fb2-2cf409cd3aa0-kube-api-access-bc58w\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.358538 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-config-data\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.358563 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-fernet-keys\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.359760 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-combined-ca-bundle\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.366470 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-fernet-keys\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.367230 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-config-data\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.367814 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-combined-ca-bundle\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.375674 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc58w\" (UniqueName: \"kubernetes.io/projected/34591905-4a5d-490e-9fb2-2cf409cd3aa0-kube-api-access-bc58w\") pod \"keystone-cron-29402521-d622q\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:00 crc kubenswrapper[4940]: I1126 10:01:00.493454 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:01 crc kubenswrapper[4940]: I1126 10:01:01.042970 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402521-d622q"] Nov 26 10:01:01 crc kubenswrapper[4940]: I1126 10:01:01.131974 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402521-d622q" event={"ID":"34591905-4a5d-490e-9fb2-2cf409cd3aa0","Type":"ContainerStarted","Data":"35e878e3bb70276fb3557d599508078a3a06386092e6746926b423915fbb30b1"} Nov 26 10:01:02 crc kubenswrapper[4940]: I1126 10:01:02.142627 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402521-d622q" event={"ID":"34591905-4a5d-490e-9fb2-2cf409cd3aa0","Type":"ContainerStarted","Data":"a2ddae72180737543dc1aa84b409eed6184af973bf8c9825dbdda41627527fc4"} Nov 26 10:01:02 crc kubenswrapper[4940]: I1126 10:01:02.179971 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29402521-d622q" podStartSLOduration=2.179947604 podStartE2EDuration="2.179947604s" podCreationTimestamp="2025-11-26 10:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 10:01:02.173441177 +0000 UTC m=+11163.693582806" watchObservedRunningTime="2025-11-26 10:01:02.179947604 +0000 UTC m=+11163.700089233" Nov 26 10:01:05 crc kubenswrapper[4940]: I1126 10:01:05.412443 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:01:05 crc kubenswrapper[4940]: E1126 10:01:05.415123 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:01:07 crc kubenswrapper[4940]: I1126 10:01:07.212015 4940 generic.go:334] "Generic (PLEG): container finished" podID="34591905-4a5d-490e-9fb2-2cf409cd3aa0" containerID="a2ddae72180737543dc1aa84b409eed6184af973bf8c9825dbdda41627527fc4" exitCode=0 Nov 26 10:01:07 crc kubenswrapper[4940]: I1126 10:01:07.212591 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402521-d622q" event={"ID":"34591905-4a5d-490e-9fb2-2cf409cd3aa0","Type":"ContainerDied","Data":"a2ddae72180737543dc1aa84b409eed6184af973bf8c9825dbdda41627527fc4"} Nov 26 10:01:08 crc kubenswrapper[4940]: I1126 10:01:08.784292 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:08 crc kubenswrapper[4940]: I1126 10:01:08.948513 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-combined-ca-bundle\") pod \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " Nov 26 10:01:08 crc kubenswrapper[4940]: I1126 10:01:08.948559 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-fernet-keys\") pod \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " Nov 26 10:01:08 crc kubenswrapper[4940]: I1126 10:01:08.948658 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-config-data\") pod \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " Nov 26 10:01:08 crc kubenswrapper[4940]: I1126 10:01:08.948713 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc58w\" (UniqueName: \"kubernetes.io/projected/34591905-4a5d-490e-9fb2-2cf409cd3aa0-kube-api-access-bc58w\") pod \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\" (UID: \"34591905-4a5d-490e-9fb2-2cf409cd3aa0\") " Nov 26 10:01:08 crc kubenswrapper[4940]: I1126 10:01:08.955448 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34591905-4a5d-490e-9fb2-2cf409cd3aa0-kube-api-access-bc58w" (OuterVolumeSpecName: "kube-api-access-bc58w") pod "34591905-4a5d-490e-9fb2-2cf409cd3aa0" (UID: "34591905-4a5d-490e-9fb2-2cf409cd3aa0"). InnerVolumeSpecName "kube-api-access-bc58w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:01:08 crc kubenswrapper[4940]: I1126 10:01:08.955992 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "34591905-4a5d-490e-9fb2-2cf409cd3aa0" (UID: "34591905-4a5d-490e-9fb2-2cf409cd3aa0"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 10:01:08 crc kubenswrapper[4940]: I1126 10:01:08.989259 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34591905-4a5d-490e-9fb2-2cf409cd3aa0" (UID: "34591905-4a5d-490e-9fb2-2cf409cd3aa0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 10:01:09 crc kubenswrapper[4940]: I1126 10:01:09.034164 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-config-data" (OuterVolumeSpecName: "config-data") pod "34591905-4a5d-490e-9fb2-2cf409cd3aa0" (UID: "34591905-4a5d-490e-9fb2-2cf409cd3aa0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 10:01:09 crc kubenswrapper[4940]: I1126 10:01:09.052515 4940 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 10:01:09 crc kubenswrapper[4940]: I1126 10:01:09.052581 4940 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 10:01:09 crc kubenswrapper[4940]: I1126 10:01:09.052594 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34591905-4a5d-490e-9fb2-2cf409cd3aa0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 10:01:09 crc kubenswrapper[4940]: I1126 10:01:09.052606 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc58w\" (UniqueName: \"kubernetes.io/projected/34591905-4a5d-490e-9fb2-2cf409cd3aa0-kube-api-access-bc58w\") on node \"crc\" DevicePath \"\"" Nov 26 10:01:09 crc kubenswrapper[4940]: I1126 10:01:09.232437 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402521-d622q" event={"ID":"34591905-4a5d-490e-9fb2-2cf409cd3aa0","Type":"ContainerDied","Data":"35e878e3bb70276fb3557d599508078a3a06386092e6746926b423915fbb30b1"} Nov 26 10:01:09 crc kubenswrapper[4940]: I1126 10:01:09.232673 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35e878e3bb70276fb3557d599508078a3a06386092e6746926b423915fbb30b1" Nov 26 10:01:09 crc kubenswrapper[4940]: I1126 10:01:09.232705 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402521-d622q" Nov 26 10:01:19 crc kubenswrapper[4940]: I1126 10:01:19.172905 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:01:19 crc kubenswrapper[4940]: E1126 10:01:19.173714 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:01:34 crc kubenswrapper[4940]: I1126 10:01:34.165770 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:01:34 crc kubenswrapper[4940]: E1126 10:01:34.166450 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:01:49 crc kubenswrapper[4940]: I1126 10:01:49.178964 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:01:49 crc kubenswrapper[4940]: E1126 10:01:49.179790 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:02:03 crc kubenswrapper[4940]: I1126 10:02:03.166342 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:02:03 crc kubenswrapper[4940]: E1126 10:02:03.167424 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:02:17 crc kubenswrapper[4940]: I1126 10:02:17.169650 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:02:17 crc kubenswrapper[4940]: E1126 10:02:17.170569 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:02:28 crc kubenswrapper[4940]: I1126 10:02:28.166387 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:02:28 crc kubenswrapper[4940]: E1126 10:02:28.167029 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:02:42 crc kubenswrapper[4940]: I1126 10:02:42.165534 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:02:42 crc kubenswrapper[4940]: E1126 10:02:42.166555 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:02:57 crc kubenswrapper[4940]: I1126 10:02:57.166470 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:02:57 crc kubenswrapper[4940]: I1126 10:02:57.506708 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"b417c15488b023593fd6f3c3f99b556cbd22261b9b352b031f84ca58d98aeb04"} Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.030626 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cgmm9"] Nov 26 10:04:50 crc kubenswrapper[4940]: E1126 10:04:50.031792 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34591905-4a5d-490e-9fb2-2cf409cd3aa0" containerName="keystone-cron" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.031823 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="34591905-4a5d-490e-9fb2-2cf409cd3aa0" containerName="keystone-cron" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.032117 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="34591905-4a5d-490e-9fb2-2cf409cd3aa0" containerName="keystone-cron" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.034490 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.040477 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cgmm9"] Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.121414 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-utilities\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.121557 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6lnv\" (UniqueName: \"kubernetes.io/projected/39cc26f8-27b3-4315-aad7-58ef0abefb01-kube-api-access-w6lnv\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.121644 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-catalog-content\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.223604 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6lnv\" (UniqueName: \"kubernetes.io/projected/39cc26f8-27b3-4315-aad7-58ef0abefb01-kube-api-access-w6lnv\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.223740 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-catalog-content\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.223813 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-utilities\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.224403 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-catalog-content\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.224468 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-utilities\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.264458 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6lnv\" (UniqueName: \"kubernetes.io/projected/39cc26f8-27b3-4315-aad7-58ef0abefb01-kube-api-access-w6lnv\") pod \"redhat-operators-cgmm9\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.362786 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:04:50 crc kubenswrapper[4940]: I1126 10:04:50.918338 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cgmm9"] Nov 26 10:04:50 crc kubenswrapper[4940]: W1126 10:04:50.920176 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39cc26f8_27b3_4315_aad7_58ef0abefb01.slice/crio-64dd5da575cf0a811dd2d6065f21e369283e684a3914023b49622d29e5816bff WatchSource:0}: Error finding container 64dd5da575cf0a811dd2d6065f21e369283e684a3914023b49622d29e5816bff: Status 404 returned error can't find the container with id 64dd5da575cf0a811dd2d6065f21e369283e684a3914023b49622d29e5816bff Nov 26 10:04:51 crc kubenswrapper[4940]: I1126 10:04:51.860871 4940 generic.go:334] "Generic (PLEG): container finished" podID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerID="32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5" exitCode=0 Nov 26 10:04:51 crc kubenswrapper[4940]: I1126 10:04:51.861012 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgmm9" event={"ID":"39cc26f8-27b3-4315-aad7-58ef0abefb01","Type":"ContainerDied","Data":"32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5"} Nov 26 10:04:51 crc kubenswrapper[4940]: I1126 10:04:51.861177 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgmm9" event={"ID":"39cc26f8-27b3-4315-aad7-58ef0abefb01","Type":"ContainerStarted","Data":"64dd5da575cf0a811dd2d6065f21e369283e684a3914023b49622d29e5816bff"} Nov 26 10:04:51 crc kubenswrapper[4940]: I1126 10:04:51.863399 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 10:04:52 crc kubenswrapper[4940]: I1126 10:04:52.888640 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgmm9" event={"ID":"39cc26f8-27b3-4315-aad7-58ef0abefb01","Type":"ContainerStarted","Data":"949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854"} Nov 26 10:04:57 crc kubenswrapper[4940]: I1126 10:04:57.945665 4940 generic.go:334] "Generic (PLEG): container finished" podID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerID="949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854" exitCode=0 Nov 26 10:04:57 crc kubenswrapper[4940]: I1126 10:04:57.945735 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgmm9" event={"ID":"39cc26f8-27b3-4315-aad7-58ef0abefb01","Type":"ContainerDied","Data":"949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854"} Nov 26 10:04:58 crc kubenswrapper[4940]: I1126 10:04:58.980711 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgmm9" event={"ID":"39cc26f8-27b3-4315-aad7-58ef0abefb01","Type":"ContainerStarted","Data":"2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f"} Nov 26 10:04:59 crc kubenswrapper[4940]: I1126 10:04:59.012768 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cgmm9" podStartSLOduration=3.206872247 podStartE2EDuration="10.012747171s" podCreationTimestamp="2025-11-26 10:04:49 +0000 UTC" firstStartedPulling="2025-11-26 10:04:51.863208861 +0000 UTC m=+11393.383350480" lastFinishedPulling="2025-11-26 10:04:58.669083775 +0000 UTC m=+11400.189225404" observedRunningTime="2025-11-26 10:04:59.00739123 +0000 UTC m=+11400.527532859" watchObservedRunningTime="2025-11-26 10:04:59.012747171 +0000 UTC m=+11400.532888790" Nov 26 10:05:00 crc kubenswrapper[4940]: I1126 10:05:00.363241 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:05:00 crc kubenswrapper[4940]: I1126 10:05:00.364474 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:05:01 crc kubenswrapper[4940]: I1126 10:05:01.432893 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cgmm9" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="registry-server" probeResult="failure" output=< Nov 26 10:05:01 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 10:05:01 crc kubenswrapper[4940]: > Nov 26 10:05:11 crc kubenswrapper[4940]: I1126 10:05:11.415579 4940 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cgmm9" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="registry-server" probeResult="failure" output=< Nov 26 10:05:11 crc kubenswrapper[4940]: timeout: failed to connect service ":50051" within 1s Nov 26 10:05:11 crc kubenswrapper[4940]: > Nov 26 10:05:20 crc kubenswrapper[4940]: I1126 10:05:20.456592 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:05:20 crc kubenswrapper[4940]: I1126 10:05:20.525166 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:05:21 crc kubenswrapper[4940]: I1126 10:05:21.224923 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cgmm9"] Nov 26 10:05:21 crc kubenswrapper[4940]: I1126 10:05:21.728171 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:05:21 crc kubenswrapper[4940]: I1126 10:05:21.728524 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:05:22 crc kubenswrapper[4940]: I1126 10:05:22.264010 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cgmm9" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="registry-server" containerID="cri-o://2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f" gracePeriod=2 Nov 26 10:05:22 crc kubenswrapper[4940]: I1126 10:05:22.977443 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.133296 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-catalog-content\") pod \"39cc26f8-27b3-4315-aad7-58ef0abefb01\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.133415 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6lnv\" (UniqueName: \"kubernetes.io/projected/39cc26f8-27b3-4315-aad7-58ef0abefb01-kube-api-access-w6lnv\") pod \"39cc26f8-27b3-4315-aad7-58ef0abefb01\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.133549 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-utilities\") pod \"39cc26f8-27b3-4315-aad7-58ef0abefb01\" (UID: \"39cc26f8-27b3-4315-aad7-58ef0abefb01\") " Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.135931 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-utilities" (OuterVolumeSpecName: "utilities") pod "39cc26f8-27b3-4315-aad7-58ef0abefb01" (UID: "39cc26f8-27b3-4315-aad7-58ef0abefb01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.141770 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39cc26f8-27b3-4315-aad7-58ef0abefb01-kube-api-access-w6lnv" (OuterVolumeSpecName: "kube-api-access-w6lnv") pod "39cc26f8-27b3-4315-aad7-58ef0abefb01" (UID: "39cc26f8-27b3-4315-aad7-58ef0abefb01"). InnerVolumeSpecName "kube-api-access-w6lnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.236436 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6lnv\" (UniqueName: \"kubernetes.io/projected/39cc26f8-27b3-4315-aad7-58ef0abefb01-kube-api-access-w6lnv\") on node \"crc\" DevicePath \"\"" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.236785 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.262389 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39cc26f8-27b3-4315-aad7-58ef0abefb01" (UID: "39cc26f8-27b3-4315-aad7-58ef0abefb01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.277478 4940 generic.go:334] "Generic (PLEG): container finished" podID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerID="2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f" exitCode=0 Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.277540 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgmm9" event={"ID":"39cc26f8-27b3-4315-aad7-58ef0abefb01","Type":"ContainerDied","Data":"2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f"} Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.277583 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgmm9" event={"ID":"39cc26f8-27b3-4315-aad7-58ef0abefb01","Type":"ContainerDied","Data":"64dd5da575cf0a811dd2d6065f21e369283e684a3914023b49622d29e5816bff"} Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.277612 4940 scope.go:117] "RemoveContainer" containerID="2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.277541 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cgmm9" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.310937 4940 scope.go:117] "RemoveContainer" containerID="949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.325069 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cgmm9"] Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.337085 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cgmm9"] Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.337212 4940 scope.go:117] "RemoveContainer" containerID="32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.339306 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39cc26f8-27b3-4315-aad7-58ef0abefb01-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.388308 4940 scope.go:117] "RemoveContainer" containerID="2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f" Nov 26 10:05:23 crc kubenswrapper[4940]: E1126 10:05:23.388739 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f\": container with ID starting with 2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f not found: ID does not exist" containerID="2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.388779 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f"} err="failed to get container status \"2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f\": rpc error: code = NotFound desc = could not find container \"2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f\": container with ID starting with 2ff11c91825e33151108dd31b97ace3f8c539825cc625b226de00e49d786287f not found: ID does not exist" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.388808 4940 scope.go:117] "RemoveContainer" containerID="949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854" Nov 26 10:05:23 crc kubenswrapper[4940]: E1126 10:05:23.389203 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854\": container with ID starting with 949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854 not found: ID does not exist" containerID="949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.389316 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854"} err="failed to get container status \"949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854\": rpc error: code = NotFound desc = could not find container \"949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854\": container with ID starting with 949e958aa761bb082529f1bad55737983458456910f92cacc04344922217c854 not found: ID does not exist" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.389386 4940 scope.go:117] "RemoveContainer" containerID="32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5" Nov 26 10:05:23 crc kubenswrapper[4940]: E1126 10:05:23.389659 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5\": container with ID starting with 32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5 not found: ID does not exist" containerID="32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5" Nov 26 10:05:23 crc kubenswrapper[4940]: I1126 10:05:23.389678 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5"} err="failed to get container status \"32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5\": rpc error: code = NotFound desc = could not find container \"32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5\": container with ID starting with 32a63623925205a9f0d92fd58939ef176653bbfc57275e2677822ea4758488b5 not found: ID does not exist" Nov 26 10:05:25 crc kubenswrapper[4940]: I1126 10:05:25.179318 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" path="/var/lib/kubelet/pods/39cc26f8-27b3-4315-aad7-58ef0abefb01/volumes" Nov 26 10:05:51 crc kubenswrapper[4940]: I1126 10:05:51.728566 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:05:51 crc kubenswrapper[4940]: I1126 10:05:51.728974 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:06:21 crc kubenswrapper[4940]: I1126 10:06:21.727816 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:06:21 crc kubenswrapper[4940]: I1126 10:06:21.728290 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:06:21 crc kubenswrapper[4940]: I1126 10:06:21.728336 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 10:06:21 crc kubenswrapper[4940]: I1126 10:06:21.728836 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b417c15488b023593fd6f3c3f99b556cbd22261b9b352b031f84ca58d98aeb04"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 10:06:21 crc kubenswrapper[4940]: I1126 10:06:21.728887 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://b417c15488b023593fd6f3c3f99b556cbd22261b9b352b031f84ca58d98aeb04" gracePeriod=600 Nov 26 10:06:21 crc kubenswrapper[4940]: I1126 10:06:21.938936 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="b417c15488b023593fd6f3c3f99b556cbd22261b9b352b031f84ca58d98aeb04" exitCode=0 Nov 26 10:06:21 crc kubenswrapper[4940]: I1126 10:06:21.938966 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"b417c15488b023593fd6f3c3f99b556cbd22261b9b352b031f84ca58d98aeb04"} Nov 26 10:06:21 crc kubenswrapper[4940]: I1126 10:06:21.939280 4940 scope.go:117] "RemoveContainer" containerID="e97438f74e9782678648f26fd6cd33a19f4092e857d26b9f796c91403fc4ddbc" Nov 26 10:06:22 crc kubenswrapper[4940]: I1126 10:06:22.950195 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c"} Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.694589 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jswvm"] Nov 26 10:06:42 crc kubenswrapper[4940]: E1126 10:06:42.713108 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="registry-server" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.713144 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="registry-server" Nov 26 10:06:42 crc kubenswrapper[4940]: E1126 10:06:42.713175 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="extract-content" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.713182 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="extract-content" Nov 26 10:06:42 crc kubenswrapper[4940]: E1126 10:06:42.713207 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="extract-utilities" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.713346 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="extract-utilities" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.713640 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="39cc26f8-27b3-4315-aad7-58ef0abefb01" containerName="registry-server" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.715619 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jswvm"] Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.715713 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.819016 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-utilities\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.819376 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-catalog-content\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.819500 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzpjs\" (UniqueName: \"kubernetes.io/projected/ef26ae31-cda2-44e4-bb80-57ba50216d0d-kube-api-access-qzpjs\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.921851 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-catalog-content\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.921984 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzpjs\" (UniqueName: \"kubernetes.io/projected/ef26ae31-cda2-44e4-bb80-57ba50216d0d-kube-api-access-qzpjs\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.922200 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-utilities\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.922376 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-catalog-content\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.922622 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-utilities\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:42 crc kubenswrapper[4940]: I1126 10:06:42.945774 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzpjs\" (UniqueName: \"kubernetes.io/projected/ef26ae31-cda2-44e4-bb80-57ba50216d0d-kube-api-access-qzpjs\") pod \"certified-operators-jswvm\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:43 crc kubenswrapper[4940]: I1126 10:06:43.043998 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:43 crc kubenswrapper[4940]: W1126 10:06:43.592671 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef26ae31_cda2_44e4_bb80_57ba50216d0d.slice/crio-3eed846f4546a94467c5c42bc3ac43ff500dd982a1fc278103c06979e9246fec WatchSource:0}: Error finding container 3eed846f4546a94467c5c42bc3ac43ff500dd982a1fc278103c06979e9246fec: Status 404 returned error can't find the container with id 3eed846f4546a94467c5c42bc3ac43ff500dd982a1fc278103c06979e9246fec Nov 26 10:06:43 crc kubenswrapper[4940]: I1126 10:06:43.608187 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jswvm"] Nov 26 10:06:44 crc kubenswrapper[4940]: I1126 10:06:44.168282 4940 generic.go:334] "Generic (PLEG): container finished" podID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerID="22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319" exitCode=0 Nov 26 10:06:44 crc kubenswrapper[4940]: I1126 10:06:44.168326 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jswvm" event={"ID":"ef26ae31-cda2-44e4-bb80-57ba50216d0d","Type":"ContainerDied","Data":"22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319"} Nov 26 10:06:44 crc kubenswrapper[4940]: I1126 10:06:44.168706 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jswvm" event={"ID":"ef26ae31-cda2-44e4-bb80-57ba50216d0d","Type":"ContainerStarted","Data":"3eed846f4546a94467c5c42bc3ac43ff500dd982a1fc278103c06979e9246fec"} Nov 26 10:06:46 crc kubenswrapper[4940]: I1126 10:06:46.198984 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jswvm" event={"ID":"ef26ae31-cda2-44e4-bb80-57ba50216d0d","Type":"ContainerStarted","Data":"73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c"} Nov 26 10:06:47 crc kubenswrapper[4940]: I1126 10:06:47.216746 4940 generic.go:334] "Generic (PLEG): container finished" podID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerID="73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c" exitCode=0 Nov 26 10:06:47 crc kubenswrapper[4940]: I1126 10:06:47.216792 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jswvm" event={"ID":"ef26ae31-cda2-44e4-bb80-57ba50216d0d","Type":"ContainerDied","Data":"73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c"} Nov 26 10:06:48 crc kubenswrapper[4940]: I1126 10:06:48.231885 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jswvm" event={"ID":"ef26ae31-cda2-44e4-bb80-57ba50216d0d","Type":"ContainerStarted","Data":"cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6"} Nov 26 10:06:48 crc kubenswrapper[4940]: I1126 10:06:48.258124 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jswvm" podStartSLOduration=2.64043622 podStartE2EDuration="6.258102772s" podCreationTimestamp="2025-11-26 10:06:42 +0000 UTC" firstStartedPulling="2025-11-26 10:06:44.169361875 +0000 UTC m=+11505.689503494" lastFinishedPulling="2025-11-26 10:06:47.787028417 +0000 UTC m=+11509.307170046" observedRunningTime="2025-11-26 10:06:48.256437988 +0000 UTC m=+11509.776579617" watchObservedRunningTime="2025-11-26 10:06:48.258102772 +0000 UTC m=+11509.778244411" Nov 26 10:06:53 crc kubenswrapper[4940]: I1126 10:06:53.044723 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:53 crc kubenswrapper[4940]: I1126 10:06:53.045409 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:53 crc kubenswrapper[4940]: I1126 10:06:53.116731 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:53 crc kubenswrapper[4940]: I1126 10:06:53.358289 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:53 crc kubenswrapper[4940]: I1126 10:06:53.419571 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jswvm"] Nov 26 10:06:55 crc kubenswrapper[4940]: I1126 10:06:55.298793 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jswvm" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerName="registry-server" containerID="cri-o://cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6" gracePeriod=2 Nov 26 10:06:55 crc kubenswrapper[4940]: E1126 10:06:55.431654 4940 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef26ae31_cda2_44e4_bb80_57ba50216d0d.slice/crio-conmon-cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef26ae31_cda2_44e4_bb80_57ba50216d0d.slice/crio-cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6.scope\": RecentStats: unable to find data in memory cache]" Nov 26 10:06:55 crc kubenswrapper[4940]: I1126 10:06:55.994602 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.075023 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-utilities\") pod \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.075151 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-catalog-content\") pod \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.075182 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzpjs\" (UniqueName: \"kubernetes.io/projected/ef26ae31-cda2-44e4-bb80-57ba50216d0d-kube-api-access-qzpjs\") pod \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\" (UID: \"ef26ae31-cda2-44e4-bb80-57ba50216d0d\") " Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.077023 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-utilities" (OuterVolumeSpecName: "utilities") pod "ef26ae31-cda2-44e4-bb80-57ba50216d0d" (UID: "ef26ae31-cda2-44e4-bb80-57ba50216d0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.083412 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef26ae31-cda2-44e4-bb80-57ba50216d0d-kube-api-access-qzpjs" (OuterVolumeSpecName: "kube-api-access-qzpjs") pod "ef26ae31-cda2-44e4-bb80-57ba50216d0d" (UID: "ef26ae31-cda2-44e4-bb80-57ba50216d0d"). InnerVolumeSpecName "kube-api-access-qzpjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.176908 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.177182 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzpjs\" (UniqueName: \"kubernetes.io/projected/ef26ae31-cda2-44e4-bb80-57ba50216d0d-kube-api-access-qzpjs\") on node \"crc\" DevicePath \"\"" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.183390 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ef26ae31-cda2-44e4-bb80-57ba50216d0d" (UID: "ef26ae31-cda2-44e4-bb80-57ba50216d0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.280461 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef26ae31-cda2-44e4-bb80-57ba50216d0d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.314638 4940 generic.go:334] "Generic (PLEG): container finished" podID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerID="cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6" exitCode=0 Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.314684 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jswvm" event={"ID":"ef26ae31-cda2-44e4-bb80-57ba50216d0d","Type":"ContainerDied","Data":"cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6"} Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.314715 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jswvm" event={"ID":"ef26ae31-cda2-44e4-bb80-57ba50216d0d","Type":"ContainerDied","Data":"3eed846f4546a94467c5c42bc3ac43ff500dd982a1fc278103c06979e9246fec"} Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.314739 4940 scope.go:117] "RemoveContainer" containerID="cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.316554 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jswvm" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.363317 4940 scope.go:117] "RemoveContainer" containerID="73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.370951 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jswvm"] Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.393478 4940 scope.go:117] "RemoveContainer" containerID="22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.414012 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jswvm"] Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.449342 4940 scope.go:117] "RemoveContainer" containerID="cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6" Nov 26 10:06:56 crc kubenswrapper[4940]: E1126 10:06:56.449720 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6\": container with ID starting with cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6 not found: ID does not exist" containerID="cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.449761 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6"} err="failed to get container status \"cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6\": rpc error: code = NotFound desc = could not find container \"cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6\": container with ID starting with cd0161a7756ab7019baf503755cc4f6f5d45aec02bb8dae36f6ba841c7485eb6 not found: ID does not exist" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.449789 4940 scope.go:117] "RemoveContainer" containerID="73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c" Nov 26 10:06:56 crc kubenswrapper[4940]: E1126 10:06:56.450115 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c\": container with ID starting with 73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c not found: ID does not exist" containerID="73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.450145 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c"} err="failed to get container status \"73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c\": rpc error: code = NotFound desc = could not find container \"73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c\": container with ID starting with 73bc49a7c0833d85fa1966f96094acb2c242698ac30fefdc6d5c830f018b151c not found: ID does not exist" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.450169 4940 scope.go:117] "RemoveContainer" containerID="22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319" Nov 26 10:06:56 crc kubenswrapper[4940]: E1126 10:06:56.450422 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319\": container with ID starting with 22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319 not found: ID does not exist" containerID="22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319" Nov 26 10:06:56 crc kubenswrapper[4940]: I1126 10:06:56.450464 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319"} err="failed to get container status \"22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319\": rpc error: code = NotFound desc = could not find container \"22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319\": container with ID starting with 22fea244858329075e9aa5d8cf27602dc5b41ab3a2f3ad80a3aee48fe636e319 not found: ID does not exist" Nov 26 10:06:57 crc kubenswrapper[4940]: I1126 10:06:57.185410 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" path="/var/lib/kubelet/pods/ef26ae31-cda2-44e4-bb80-57ba50216d0d/volumes" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.650497 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mnlxq"] Nov 26 10:08:48 crc kubenswrapper[4940]: E1126 10:08:48.651800 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerName="extract-content" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.651821 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerName="extract-content" Nov 26 10:08:48 crc kubenswrapper[4940]: E1126 10:08:48.651856 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerName="registry-server" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.651867 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerName="registry-server" Nov 26 10:08:48 crc kubenswrapper[4940]: E1126 10:08:48.651897 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerName="extract-utilities" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.651908 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerName="extract-utilities" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.652288 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef26ae31-cda2-44e4-bb80-57ba50216d0d" containerName="registry-server" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.654740 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.661856 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-utilities\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.661924 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx94s\" (UniqueName: \"kubernetes.io/projected/615a0ab7-5b49-48b5-863f-843986b0771a-kube-api-access-fx94s\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.662089 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-catalog-content\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.677902 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mnlxq"] Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.762878 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx94s\" (UniqueName: \"kubernetes.io/projected/615a0ab7-5b49-48b5-863f-843986b0771a-kube-api-access-fx94s\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.762964 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-catalog-content\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.763093 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-utilities\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.763469 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-utilities\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.770203 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-catalog-content\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.782448 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx94s\" (UniqueName: \"kubernetes.io/projected/615a0ab7-5b49-48b5-863f-843986b0771a-kube-api-access-fx94s\") pod \"community-operators-mnlxq\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:48 crc kubenswrapper[4940]: I1126 10:08:48.989311 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:49 crc kubenswrapper[4940]: I1126 10:08:49.565794 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mnlxq"] Nov 26 10:08:50 crc kubenswrapper[4940]: I1126 10:08:50.053085 4940 generic.go:334] "Generic (PLEG): container finished" podID="615a0ab7-5b49-48b5-863f-843986b0771a" containerID="5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10" exitCode=0 Nov 26 10:08:50 crc kubenswrapper[4940]: I1126 10:08:50.053154 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnlxq" event={"ID":"615a0ab7-5b49-48b5-863f-843986b0771a","Type":"ContainerDied","Data":"5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10"} Nov 26 10:08:50 crc kubenswrapper[4940]: I1126 10:08:50.053224 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnlxq" event={"ID":"615a0ab7-5b49-48b5-863f-843986b0771a","Type":"ContainerStarted","Data":"1fe61df309b413f8ac8113399f3d96676babcc7b0b78099ee9bb7bad7ccd16f8"} Nov 26 10:08:51 crc kubenswrapper[4940]: I1126 10:08:51.062689 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnlxq" event={"ID":"615a0ab7-5b49-48b5-863f-843986b0771a","Type":"ContainerStarted","Data":"8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f"} Nov 26 10:08:51 crc kubenswrapper[4940]: I1126 10:08:51.728399 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:08:51 crc kubenswrapper[4940]: I1126 10:08:51.728450 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:08:53 crc kubenswrapper[4940]: I1126 10:08:53.100442 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnlxq" event={"ID":"615a0ab7-5b49-48b5-863f-843986b0771a","Type":"ContainerDied","Data":"8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f"} Nov 26 10:08:53 crc kubenswrapper[4940]: I1126 10:08:53.100382 4940 generic.go:334] "Generic (PLEG): container finished" podID="615a0ab7-5b49-48b5-863f-843986b0771a" containerID="8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f" exitCode=0 Nov 26 10:08:54 crc kubenswrapper[4940]: I1126 10:08:54.118482 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnlxq" event={"ID":"615a0ab7-5b49-48b5-863f-843986b0771a","Type":"ContainerStarted","Data":"e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab"} Nov 26 10:08:54 crc kubenswrapper[4940]: I1126 10:08:54.139877 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mnlxq" podStartSLOduration=2.443981655 podStartE2EDuration="6.139862648s" podCreationTimestamp="2025-11-26 10:08:48 +0000 UTC" firstStartedPulling="2025-11-26 10:08:50.056356489 +0000 UTC m=+11631.576498148" lastFinishedPulling="2025-11-26 10:08:53.752237522 +0000 UTC m=+11635.272379141" observedRunningTime="2025-11-26 10:08:54.137579876 +0000 UTC m=+11635.657721505" watchObservedRunningTime="2025-11-26 10:08:54.139862648 +0000 UTC m=+11635.660004267" Nov 26 10:08:58 crc kubenswrapper[4940]: I1126 10:08:58.990270 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:58 crc kubenswrapper[4940]: I1126 10:08:58.991347 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:59 crc kubenswrapper[4940]: I1126 10:08:59.043808 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:59 crc kubenswrapper[4940]: I1126 10:08:59.226573 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:08:59 crc kubenswrapper[4940]: I1126 10:08:59.287355 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mnlxq"] Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.206675 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mnlxq" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" containerName="registry-server" containerID="cri-o://e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab" gracePeriod=2 Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.693603 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j5lwp"] Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.695866 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.720387 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j5lwp"] Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.877248 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-utilities\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.877302 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-catalog-content\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.877535 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29pb8\" (UniqueName: \"kubernetes.io/projected/814a853f-543e-4721-ad7c-49bc6334ade4-kube-api-access-29pb8\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.933220 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.980150 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-catalog-content\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.980537 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29pb8\" (UniqueName: \"kubernetes.io/projected/814a853f-543e-4721-ad7c-49bc6334ade4-kube-api-access-29pb8\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.980674 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-catalog-content\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.980921 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-utilities\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.981202 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-utilities\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:01 crc kubenswrapper[4940]: I1126 10:09:01.999649 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29pb8\" (UniqueName: \"kubernetes.io/projected/814a853f-543e-4721-ad7c-49bc6334ade4-kube-api-access-29pb8\") pod \"redhat-marketplace-j5lwp\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.015222 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.082727 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-catalog-content\") pod \"615a0ab7-5b49-48b5-863f-843986b0771a\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.082889 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fx94s\" (UniqueName: \"kubernetes.io/projected/615a0ab7-5b49-48b5-863f-843986b0771a-kube-api-access-fx94s\") pod \"615a0ab7-5b49-48b5-863f-843986b0771a\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.082929 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-utilities\") pod \"615a0ab7-5b49-48b5-863f-843986b0771a\" (UID: \"615a0ab7-5b49-48b5-863f-843986b0771a\") " Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.083916 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-utilities" (OuterVolumeSpecName: "utilities") pod "615a0ab7-5b49-48b5-863f-843986b0771a" (UID: "615a0ab7-5b49-48b5-863f-843986b0771a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.085869 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/615a0ab7-5b49-48b5-863f-843986b0771a-kube-api-access-fx94s" (OuterVolumeSpecName: "kube-api-access-fx94s") pod "615a0ab7-5b49-48b5-863f-843986b0771a" (UID: "615a0ab7-5b49-48b5-863f-843986b0771a"). InnerVolumeSpecName "kube-api-access-fx94s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.129288 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "615a0ab7-5b49-48b5-863f-843986b0771a" (UID: "615a0ab7-5b49-48b5-863f-843986b0771a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.186142 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.186175 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fx94s\" (UniqueName: \"kubernetes.io/projected/615a0ab7-5b49-48b5-863f-843986b0771a-kube-api-access-fx94s\") on node \"crc\" DevicePath \"\"" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.186188 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/615a0ab7-5b49-48b5-863f-843986b0771a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.218514 4940 generic.go:334] "Generic (PLEG): container finished" podID="615a0ab7-5b49-48b5-863f-843986b0771a" containerID="e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab" exitCode=0 Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.218575 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnlxq" event={"ID":"615a0ab7-5b49-48b5-863f-843986b0771a","Type":"ContainerDied","Data":"e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab"} Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.218603 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mnlxq" event={"ID":"615a0ab7-5b49-48b5-863f-843986b0771a","Type":"ContainerDied","Data":"1fe61df309b413f8ac8113399f3d96676babcc7b0b78099ee9bb7bad7ccd16f8"} Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.218641 4940 scope.go:117] "RemoveContainer" containerID="e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.218798 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mnlxq" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.251777 4940 scope.go:117] "RemoveContainer" containerID="8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.258437 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mnlxq"] Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.267165 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mnlxq"] Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.285937 4940 scope.go:117] "RemoveContainer" containerID="5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.329455 4940 scope.go:117] "RemoveContainer" containerID="e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab" Nov 26 10:09:02 crc kubenswrapper[4940]: E1126 10:09:02.330171 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab\": container with ID starting with e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab not found: ID does not exist" containerID="e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.330214 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab"} err="failed to get container status \"e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab\": rpc error: code = NotFound desc = could not find container \"e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab\": container with ID starting with e45e6d086496e10837d89e35748fb75c9c3f5d63db86020ebaf8310d8af824ab not found: ID does not exist" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.330241 4940 scope.go:117] "RemoveContainer" containerID="8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f" Nov 26 10:09:02 crc kubenswrapper[4940]: E1126 10:09:02.330736 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f\": container with ID starting with 8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f not found: ID does not exist" containerID="8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.330756 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f"} err="failed to get container status \"8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f\": rpc error: code = NotFound desc = could not find container \"8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f\": container with ID starting with 8da96288f7e49427e0eb446e63abfe693efa6cfdcc55e09c0ced5ced55721f2f not found: ID does not exist" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.330778 4940 scope.go:117] "RemoveContainer" containerID="5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10" Nov 26 10:09:02 crc kubenswrapper[4940]: E1126 10:09:02.330965 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10\": container with ID starting with 5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10 not found: ID does not exist" containerID="5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.330985 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10"} err="failed to get container status \"5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10\": rpc error: code = NotFound desc = could not find container \"5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10\": container with ID starting with 5fc121868ebdc4e6d051a682df510770cb1bc974240b21772f9b83d15c3cdb10 not found: ID does not exist" Nov 26 10:09:02 crc kubenswrapper[4940]: I1126 10:09:02.462964 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j5lwp"] Nov 26 10:09:03 crc kubenswrapper[4940]: I1126 10:09:03.177704 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" path="/var/lib/kubelet/pods/615a0ab7-5b49-48b5-863f-843986b0771a/volumes" Nov 26 10:09:03 crc kubenswrapper[4940]: I1126 10:09:03.229544 4940 generic.go:334] "Generic (PLEG): container finished" podID="814a853f-543e-4721-ad7c-49bc6334ade4" containerID="2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad" exitCode=0 Nov 26 10:09:03 crc kubenswrapper[4940]: I1126 10:09:03.229587 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5lwp" event={"ID":"814a853f-543e-4721-ad7c-49bc6334ade4","Type":"ContainerDied","Data":"2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad"} Nov 26 10:09:03 crc kubenswrapper[4940]: I1126 10:09:03.229612 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5lwp" event={"ID":"814a853f-543e-4721-ad7c-49bc6334ade4","Type":"ContainerStarted","Data":"95b991297e8f8428a706d9491e6d1c2f833e5ca95882cc0c952f24a03d34112d"} Nov 26 10:09:05 crc kubenswrapper[4940]: I1126 10:09:05.251362 4940 generic.go:334] "Generic (PLEG): container finished" podID="814a853f-543e-4721-ad7c-49bc6334ade4" containerID="92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164" exitCode=0 Nov 26 10:09:05 crc kubenswrapper[4940]: I1126 10:09:05.251447 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5lwp" event={"ID":"814a853f-543e-4721-ad7c-49bc6334ade4","Type":"ContainerDied","Data":"92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164"} Nov 26 10:09:06 crc kubenswrapper[4940]: I1126 10:09:06.277321 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5lwp" event={"ID":"814a853f-543e-4721-ad7c-49bc6334ade4","Type":"ContainerStarted","Data":"d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778"} Nov 26 10:09:06 crc kubenswrapper[4940]: I1126 10:09:06.299198 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j5lwp" podStartSLOduration=2.797794547 podStartE2EDuration="5.299176222s" podCreationTimestamp="2025-11-26 10:09:01 +0000 UTC" firstStartedPulling="2025-11-26 10:09:03.231007733 +0000 UTC m=+11644.751149342" lastFinishedPulling="2025-11-26 10:09:05.732389398 +0000 UTC m=+11647.252531017" observedRunningTime="2025-11-26 10:09:06.296386773 +0000 UTC m=+11647.816528392" watchObservedRunningTime="2025-11-26 10:09:06.299176222 +0000 UTC m=+11647.819317841" Nov 26 10:09:12 crc kubenswrapper[4940]: I1126 10:09:12.016394 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:12 crc kubenswrapper[4940]: I1126 10:09:12.018644 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:12 crc kubenswrapper[4940]: I1126 10:09:12.073936 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:12 crc kubenswrapper[4940]: I1126 10:09:12.399488 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:12 crc kubenswrapper[4940]: I1126 10:09:12.456304 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j5lwp"] Nov 26 10:09:14 crc kubenswrapper[4940]: I1126 10:09:14.361502 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j5lwp" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" containerName="registry-server" containerID="cri-o://d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778" gracePeriod=2 Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.060451 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.079114 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-utilities\") pod \"814a853f-543e-4721-ad7c-49bc6334ade4\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.079259 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29pb8\" (UniqueName: \"kubernetes.io/projected/814a853f-543e-4721-ad7c-49bc6334ade4-kube-api-access-29pb8\") pod \"814a853f-543e-4721-ad7c-49bc6334ade4\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.079400 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-catalog-content\") pod \"814a853f-543e-4721-ad7c-49bc6334ade4\" (UID: \"814a853f-543e-4721-ad7c-49bc6334ade4\") " Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.080188 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-utilities" (OuterVolumeSpecName: "utilities") pod "814a853f-543e-4721-ad7c-49bc6334ade4" (UID: "814a853f-543e-4721-ad7c-49bc6334ade4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.080818 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.105236 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "814a853f-543e-4721-ad7c-49bc6334ade4" (UID: "814a853f-543e-4721-ad7c-49bc6334ade4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.128602 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/814a853f-543e-4721-ad7c-49bc6334ade4-kube-api-access-29pb8" (OuterVolumeSpecName: "kube-api-access-29pb8") pod "814a853f-543e-4721-ad7c-49bc6334ade4" (UID: "814a853f-543e-4721-ad7c-49bc6334ade4"). InnerVolumeSpecName "kube-api-access-29pb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.182498 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29pb8\" (UniqueName: \"kubernetes.io/projected/814a853f-543e-4721-ad7c-49bc6334ade4-kube-api-access-29pb8\") on node \"crc\" DevicePath \"\"" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.182534 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/814a853f-543e-4721-ad7c-49bc6334ade4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.377867 4940 generic.go:334] "Generic (PLEG): container finished" podID="814a853f-543e-4721-ad7c-49bc6334ade4" containerID="d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778" exitCode=0 Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.377913 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5lwp" event={"ID":"814a853f-543e-4721-ad7c-49bc6334ade4","Type":"ContainerDied","Data":"d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778"} Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.377962 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j5lwp" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.377985 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j5lwp" event={"ID":"814a853f-543e-4721-ad7c-49bc6334ade4","Type":"ContainerDied","Data":"95b991297e8f8428a706d9491e6d1c2f833e5ca95882cc0c952f24a03d34112d"} Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.378022 4940 scope.go:117] "RemoveContainer" containerID="d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.408621 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j5lwp"] Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.410605 4940 scope.go:117] "RemoveContainer" containerID="92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.422162 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j5lwp"] Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.435195 4940 scope.go:117] "RemoveContainer" containerID="2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.502506 4940 scope.go:117] "RemoveContainer" containerID="d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778" Nov 26 10:09:15 crc kubenswrapper[4940]: E1126 10:09:15.502976 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778\": container with ID starting with d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778 not found: ID does not exist" containerID="d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.503015 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778"} err="failed to get container status \"d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778\": rpc error: code = NotFound desc = could not find container \"d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778\": container with ID starting with d778a8f93000f1efdc477132ba68dcd59b563c395a4e8bc3c9a4e6f790a65778 not found: ID does not exist" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.503052 4940 scope.go:117] "RemoveContainer" containerID="92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164" Nov 26 10:09:15 crc kubenswrapper[4940]: E1126 10:09:15.503455 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164\": container with ID starting with 92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164 not found: ID does not exist" containerID="92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.503504 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164"} err="failed to get container status \"92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164\": rpc error: code = NotFound desc = could not find container \"92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164\": container with ID starting with 92401ecd8a05084457e9fc6129931a7ec7dd9a3b0df774301815ffbf016f0164 not found: ID does not exist" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.503537 4940 scope.go:117] "RemoveContainer" containerID="2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad" Nov 26 10:09:15 crc kubenswrapper[4940]: E1126 10:09:15.503869 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad\": container with ID starting with 2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad not found: ID does not exist" containerID="2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad" Nov 26 10:09:15 crc kubenswrapper[4940]: I1126 10:09:15.503894 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad"} err="failed to get container status \"2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad\": rpc error: code = NotFound desc = could not find container \"2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad\": container with ID starting with 2d462de040d838823f1da31294d357ab855b583b9ac4d397d42c1c02633da8ad not found: ID does not exist" Nov 26 10:09:17 crc kubenswrapper[4940]: I1126 10:09:17.187467 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" path="/var/lib/kubelet/pods/814a853f-543e-4721-ad7c-49bc6334ade4/volumes" Nov 26 10:09:21 crc kubenswrapper[4940]: I1126 10:09:21.728700 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:09:21 crc kubenswrapper[4940]: I1126 10:09:21.729670 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.727887 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.728365 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.728400 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.728948 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.728991 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" gracePeriod=600 Nov 26 10:09:51 crc kubenswrapper[4940]: E1126 10:09:51.857472 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.871190 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" exitCode=0 Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.871238 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c"} Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.871280 4940 scope.go:117] "RemoveContainer" containerID="b417c15488b023593fd6f3c3f99b556cbd22261b9b352b031f84ca58d98aeb04" Nov 26 10:09:51 crc kubenswrapper[4940]: I1126 10:09:51.871955 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:09:51 crc kubenswrapper[4940]: E1126 10:09:51.872253 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:10:03 crc kubenswrapper[4940]: I1126 10:10:03.165743 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:10:03 crc kubenswrapper[4940]: E1126 10:10:03.166836 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:10:14 crc kubenswrapper[4940]: I1126 10:10:14.167194 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:10:14 crc kubenswrapper[4940]: E1126 10:10:14.168613 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:10:26 crc kubenswrapper[4940]: I1126 10:10:26.165853 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:10:26 crc kubenswrapper[4940]: E1126 10:10:26.166748 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:10:41 crc kubenswrapper[4940]: I1126 10:10:41.165991 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:10:41 crc kubenswrapper[4940]: E1126 10:10:41.166958 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:10:52 crc kubenswrapper[4940]: I1126 10:10:52.166796 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:10:52 crc kubenswrapper[4940]: E1126 10:10:52.167603 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:11:05 crc kubenswrapper[4940]: I1126 10:11:05.166395 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:11:05 crc kubenswrapper[4940]: E1126 10:11:05.167188 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:11:19 crc kubenswrapper[4940]: I1126 10:11:19.180224 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:11:19 crc kubenswrapper[4940]: E1126 10:11:19.181341 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:11:34 crc kubenswrapper[4940]: I1126 10:11:34.165966 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:11:34 crc kubenswrapper[4940]: E1126 10:11:34.167405 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:11:46 crc kubenswrapper[4940]: I1126 10:11:46.166182 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:11:46 crc kubenswrapper[4940]: E1126 10:11:46.167091 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:12:01 crc kubenswrapper[4940]: I1126 10:12:01.165559 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:12:01 crc kubenswrapper[4940]: E1126 10:12:01.166390 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:12:14 crc kubenswrapper[4940]: I1126 10:12:14.166147 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:12:14 crc kubenswrapper[4940]: E1126 10:12:14.167084 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:12:25 crc kubenswrapper[4940]: I1126 10:12:25.166894 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:12:25 crc kubenswrapper[4940]: E1126 10:12:25.167878 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:12:37 crc kubenswrapper[4940]: I1126 10:12:37.166398 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:12:37 crc kubenswrapper[4940]: E1126 10:12:37.167191 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:12:50 crc kubenswrapper[4940]: I1126 10:12:50.166189 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:12:50 crc kubenswrapper[4940]: E1126 10:12:50.166934 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:13:01 crc kubenswrapper[4940]: I1126 10:13:01.166964 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:13:01 crc kubenswrapper[4940]: E1126 10:13:01.167726 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:13:03 crc kubenswrapper[4940]: I1126 10:13:03.296202 4940 generic.go:334] "Generic (PLEG): container finished" podID="85a497f3-6d48-4234-92ac-98a55aa14977" containerID="376fd20da22a69bc242ba64e1fdad8e4863d34be7c45b5764cf15f2ba41c2eb3" exitCode=0 Nov 26 10:13:03 crc kubenswrapper[4940]: I1126 10:13:03.296300 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"85a497f3-6d48-4234-92ac-98a55aa14977","Type":"ContainerDied","Data":"376fd20da22a69bc242ba64e1fdad8e4863d34be7c45b5764cf15f2ba41c2eb3"} Nov 26 10:13:04 crc kubenswrapper[4940]: I1126 10:13:04.996465 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185265 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185356 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config-secret\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185397 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-temporary\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185448 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ca-certs\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185496 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgc6v\" (UniqueName: \"kubernetes.io/projected/85a497f3-6d48-4234-92ac-98a55aa14977-kube-api-access-fgc6v\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185555 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185616 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ssh-key\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185709 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-config-data\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.185819 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-workdir\") pod \"85a497f3-6d48-4234-92ac-98a55aa14977\" (UID: \"85a497f3-6d48-4234-92ac-98a55aa14977\") " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.186832 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.188022 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-config-data" (OuterVolumeSpecName: "config-data") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.192315 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "test-operator-logs") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.197952 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85a497f3-6d48-4234-92ac-98a55aa14977-kube-api-access-fgc6v" (OuterVolumeSpecName: "kube-api-access-fgc6v") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "kube-api-access-fgc6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.206668 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.214624 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.243928 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.249756 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.284233 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "85a497f3-6d48-4234-92ac-98a55aa14977" (UID: "85a497f3-6d48-4234-92ac-98a55aa14977"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.289985 4940 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.290023 4940 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.290046 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.290057 4940 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/85a497f3-6d48-4234-92ac-98a55aa14977-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.290065 4940 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.290075 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgc6v\" (UniqueName: \"kubernetes.io/projected/85a497f3-6d48-4234-92ac-98a55aa14977-kube-api-access-fgc6v\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.290084 4940 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.290094 4940 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85a497f3-6d48-4234-92ac-98a55aa14977-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.290102 4940 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/85a497f3-6d48-4234-92ac-98a55aa14977-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.311031 4940 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.333689 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"85a497f3-6d48-4234-92ac-98a55aa14977","Type":"ContainerDied","Data":"e596beed55ede272610e900dd13de7966a48937703e575c62c60efa962b08a27"} Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.334034 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e596beed55ede272610e900dd13de7966a48937703e575c62c60efa962b08a27" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.334364 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 10:13:05 crc kubenswrapper[4940]: I1126 10:13:05.391829 4940 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.826153 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 10:13:10 crc kubenswrapper[4940]: E1126 10:13:10.827016 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" containerName="registry-server" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827059 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" containerName="registry-server" Nov 26 10:13:10 crc kubenswrapper[4940]: E1126 10:13:10.827122 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" containerName="extract-content" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827136 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" containerName="extract-content" Nov 26 10:13:10 crc kubenswrapper[4940]: E1126 10:13:10.827162 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" containerName="registry-server" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827172 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" containerName="registry-server" Nov 26 10:13:10 crc kubenswrapper[4940]: E1126 10:13:10.827196 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" containerName="extract-content" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827207 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" containerName="extract-content" Nov 26 10:13:10 crc kubenswrapper[4940]: E1126 10:13:10.827220 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" containerName="extract-utilities" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827229 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" containerName="extract-utilities" Nov 26 10:13:10 crc kubenswrapper[4940]: E1126 10:13:10.827251 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85a497f3-6d48-4234-92ac-98a55aa14977" containerName="tempest-tests-tempest-tests-runner" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827260 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="85a497f3-6d48-4234-92ac-98a55aa14977" containerName="tempest-tests-tempest-tests-runner" Nov 26 10:13:10 crc kubenswrapper[4940]: E1126 10:13:10.827273 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" containerName="extract-utilities" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827282 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" containerName="extract-utilities" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827552 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="814a853f-543e-4721-ad7c-49bc6334ade4" containerName="registry-server" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827571 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="615a0ab7-5b49-48b5-863f-843986b0771a" containerName="registry-server" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.827599 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="85a497f3-6d48-4234-92ac-98a55aa14977" containerName="tempest-tests-tempest-tests-runner" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.828550 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.831928 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-krr5d" Nov 26 10:13:10 crc kubenswrapper[4940]: I1126 10:13:10.838371 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.035146 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svvqm\" (UniqueName: \"kubernetes.io/projected/a444a31f-737b-4340-821b-f639f2fe76bb-kube-api-access-svvqm\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"a444a31f-737b-4340-821b-f639f2fe76bb\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.035203 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"a444a31f-737b-4340-821b-f639f2fe76bb\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.137110 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svvqm\" (UniqueName: \"kubernetes.io/projected/a444a31f-737b-4340-821b-f639f2fe76bb-kube-api-access-svvqm\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"a444a31f-737b-4340-821b-f639f2fe76bb\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.137174 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"a444a31f-737b-4340-821b-f639f2fe76bb\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.137524 4940 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"a444a31f-737b-4340-821b-f639f2fe76bb\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.169709 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"a444a31f-737b-4340-821b-f639f2fe76bb\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.174453 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svvqm\" (UniqueName: \"kubernetes.io/projected/a444a31f-737b-4340-821b-f639f2fe76bb-kube-api-access-svvqm\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"a444a31f-737b-4340-821b-f639f2fe76bb\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.465649 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 10:13:11 crc kubenswrapper[4940]: I1126 10:13:11.995543 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 10:13:12 crc kubenswrapper[4940]: I1126 10:13:12.023216 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 10:13:12 crc kubenswrapper[4940]: I1126 10:13:12.424710 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"a444a31f-737b-4340-821b-f639f2fe76bb","Type":"ContainerStarted","Data":"d814b1b96c2ed3687ed0b310452951afafdae1226220186b0344aeb6638b0f25"} Nov 26 10:13:13 crc kubenswrapper[4940]: I1126 10:13:13.444449 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"a444a31f-737b-4340-821b-f639f2fe76bb","Type":"ContainerStarted","Data":"0b3e4ef6bc12e9d326e6c395d2c8b72b1bc4f44e22de358913dae30f9030ca29"} Nov 26 10:13:13 crc kubenswrapper[4940]: I1126 10:13:13.480455 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.476084064 podStartE2EDuration="3.480415606s" podCreationTimestamp="2025-11-26 10:13:10 +0000 UTC" firstStartedPulling="2025-11-26 10:13:12.022611922 +0000 UTC m=+11893.542753581" lastFinishedPulling="2025-11-26 10:13:13.026943494 +0000 UTC m=+11894.547085123" observedRunningTime="2025-11-26 10:13:13.456443683 +0000 UTC m=+11894.976585302" watchObservedRunningTime="2025-11-26 10:13:13.480415606 +0000 UTC m=+11895.000557275" Nov 26 10:13:16 crc kubenswrapper[4940]: I1126 10:13:16.166179 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:13:16 crc kubenswrapper[4940]: E1126 10:13:16.167905 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:13:28 crc kubenswrapper[4940]: I1126 10:13:28.165825 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:13:28 crc kubenswrapper[4940]: E1126 10:13:28.167080 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:13:40 crc kubenswrapper[4940]: I1126 10:13:40.165810 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:13:40 crc kubenswrapper[4940]: E1126 10:13:40.166666 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:13:52 crc kubenswrapper[4940]: I1126 10:13:52.166804 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:13:52 crc kubenswrapper[4940]: E1126 10:13:52.167865 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:14:03 crc kubenswrapper[4940]: I1126 10:14:03.165255 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:14:03 crc kubenswrapper[4940]: E1126 10:14:03.166307 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:14:17 crc kubenswrapper[4940]: I1126 10:14:17.166198 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:14:17 crc kubenswrapper[4940]: E1126 10:14:17.167027 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:14:28 crc kubenswrapper[4940]: I1126 10:14:28.166345 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:14:28 crc kubenswrapper[4940]: E1126 10:14:28.169227 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.682640 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sd2bd/must-gather-g6wxz"] Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.685019 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.687458 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-sd2bd"/"kube-root-ca.crt" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.687682 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-sd2bd"/"openshift-service-ca.crt" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.687774 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-sd2bd"/"default-dockercfg-h7dz9" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.694423 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-sd2bd/must-gather-g6wxz"] Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.847689 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bj7k\" (UniqueName: \"kubernetes.io/projected/ec6431e5-59f3-42f0-af2b-0672303ec132-kube-api-access-2bj7k\") pod \"must-gather-g6wxz\" (UID: \"ec6431e5-59f3-42f0-af2b-0672303ec132\") " pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.848067 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ec6431e5-59f3-42f0-af2b-0672303ec132-must-gather-output\") pod \"must-gather-g6wxz\" (UID: \"ec6431e5-59f3-42f0-af2b-0672303ec132\") " pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.949852 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ec6431e5-59f3-42f0-af2b-0672303ec132-must-gather-output\") pod \"must-gather-g6wxz\" (UID: \"ec6431e5-59f3-42f0-af2b-0672303ec132\") " pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.949987 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bj7k\" (UniqueName: \"kubernetes.io/projected/ec6431e5-59f3-42f0-af2b-0672303ec132-kube-api-access-2bj7k\") pod \"must-gather-g6wxz\" (UID: \"ec6431e5-59f3-42f0-af2b-0672303ec132\") " pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.950350 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ec6431e5-59f3-42f0-af2b-0672303ec132-must-gather-output\") pod \"must-gather-g6wxz\" (UID: \"ec6431e5-59f3-42f0-af2b-0672303ec132\") " pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:14:31 crc kubenswrapper[4940]: I1126 10:14:31.967574 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bj7k\" (UniqueName: \"kubernetes.io/projected/ec6431e5-59f3-42f0-af2b-0672303ec132-kube-api-access-2bj7k\") pod \"must-gather-g6wxz\" (UID: \"ec6431e5-59f3-42f0-af2b-0672303ec132\") " pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:14:32 crc kubenswrapper[4940]: I1126 10:14:32.004060 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:14:32 crc kubenswrapper[4940]: I1126 10:14:32.507026 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-sd2bd/must-gather-g6wxz"] Nov 26 10:14:32 crc kubenswrapper[4940]: I1126 10:14:32.614920 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" event={"ID":"ec6431e5-59f3-42f0-af2b-0672303ec132","Type":"ContainerStarted","Data":"22603d74186e0462ba07ed5e3980998dc377477e119bfac19086d89a67d3e39a"} Nov 26 10:14:37 crc kubenswrapper[4940]: I1126 10:14:37.686283 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" event={"ID":"ec6431e5-59f3-42f0-af2b-0672303ec132","Type":"ContainerStarted","Data":"6430d60207abc4a4b59d30dec5087661c804e4209c70bad8a4b2206963ca5f45"} Nov 26 10:14:37 crc kubenswrapper[4940]: I1126 10:14:37.686992 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" event={"ID":"ec6431e5-59f3-42f0-af2b-0672303ec132","Type":"ContainerStarted","Data":"db4d3fc42c5c8da822130bf6930eccdc10d25e447d774a7fab6d6cd3d4f5ae72"} Nov 26 10:14:37 crc kubenswrapper[4940]: I1126 10:14:37.710280 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" podStartSLOduration=2.777868565 podStartE2EDuration="6.710255581s" podCreationTimestamp="2025-11-26 10:14:31 +0000 UTC" firstStartedPulling="2025-11-26 10:14:32.514015813 +0000 UTC m=+11974.034157432" lastFinishedPulling="2025-11-26 10:14:36.446402819 +0000 UTC m=+11977.966544448" observedRunningTime="2025-11-26 10:14:37.697935518 +0000 UTC m=+11979.218077137" watchObservedRunningTime="2025-11-26 10:14:37.710255581 +0000 UTC m=+11979.230397210" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.216849 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-zvg52"] Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.218660 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.376799 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frlk6\" (UniqueName: \"kubernetes.io/projected/350681f9-7eb9-461e-ba05-435e04d36593-kube-api-access-frlk6\") pod \"crc-debug-zvg52\" (UID: \"350681f9-7eb9-461e-ba05-435e04d36593\") " pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.376835 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/350681f9-7eb9-461e-ba05-435e04d36593-host\") pod \"crc-debug-zvg52\" (UID: \"350681f9-7eb9-461e-ba05-435e04d36593\") " pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.478651 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frlk6\" (UniqueName: \"kubernetes.io/projected/350681f9-7eb9-461e-ba05-435e04d36593-kube-api-access-frlk6\") pod \"crc-debug-zvg52\" (UID: \"350681f9-7eb9-461e-ba05-435e04d36593\") " pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.478704 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/350681f9-7eb9-461e-ba05-435e04d36593-host\") pod \"crc-debug-zvg52\" (UID: \"350681f9-7eb9-461e-ba05-435e04d36593\") " pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.478840 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/350681f9-7eb9-461e-ba05-435e04d36593-host\") pod \"crc-debug-zvg52\" (UID: \"350681f9-7eb9-461e-ba05-435e04d36593\") " pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.500459 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frlk6\" (UniqueName: \"kubernetes.io/projected/350681f9-7eb9-461e-ba05-435e04d36593-kube-api-access-frlk6\") pod \"crc-debug-zvg52\" (UID: \"350681f9-7eb9-461e-ba05-435e04d36593\") " pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.537144 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:14:42 crc kubenswrapper[4940]: I1126 10:14:42.763320 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/crc-debug-zvg52" event={"ID":"350681f9-7eb9-461e-ba05-435e04d36593","Type":"ContainerStarted","Data":"c9175d19a8a8e0d98f3f806b6399297306eb3551b6d4b8387ef455cc76cf9c02"} Nov 26 10:14:43 crc kubenswrapper[4940]: I1126 10:14:43.165700 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:14:43 crc kubenswrapper[4940]: E1126 10:14:43.166116 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:14:52 crc kubenswrapper[4940]: I1126 10:14:52.882482 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/crc-debug-zvg52" event={"ID":"350681f9-7eb9-461e-ba05-435e04d36593","Type":"ContainerStarted","Data":"97b09cc8ded34b5d54b71a06bff12bd91f7d51533abb2499fc3563bc09773d1e"} Nov 26 10:14:52 crc kubenswrapper[4940]: I1126 10:14:52.913789 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-sd2bd/crc-debug-zvg52" podStartSLOduration=1.404578329 podStartE2EDuration="10.913771235s" podCreationTimestamp="2025-11-26 10:14:42 +0000 UTC" firstStartedPulling="2025-11-26 10:14:42.590932605 +0000 UTC m=+11984.111074234" lastFinishedPulling="2025-11-26 10:14:52.100125521 +0000 UTC m=+11993.620267140" observedRunningTime="2025-11-26 10:14:52.9038674 +0000 UTC m=+11994.424009059" watchObservedRunningTime="2025-11-26 10:14:52.913771235 +0000 UTC m=+11994.433912854" Nov 26 10:14:56 crc kubenswrapper[4940]: I1126 10:14:56.166529 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:14:56 crc kubenswrapper[4940]: I1126 10:14:56.928853 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"7f0ccb6a3c589d0db29d14f0495ad6e66e731ab6a4c62b694b69cf952d85d5c3"} Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.182949 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6"] Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.185309 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.188515 4940 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.188848 4940 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.197659 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6"] Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.293593 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb03776c-fc68-47d2-9360-43a642e9642f-config-volume\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.293663 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb03776c-fc68-47d2-9360-43a642e9642f-secret-volume\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.293763 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf5sg\" (UniqueName: \"kubernetes.io/projected/cb03776c-fc68-47d2-9360-43a642e9642f-kube-api-access-pf5sg\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.395676 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb03776c-fc68-47d2-9360-43a642e9642f-config-volume\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.395761 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb03776c-fc68-47d2-9360-43a642e9642f-secret-volume\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.395857 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf5sg\" (UniqueName: \"kubernetes.io/projected/cb03776c-fc68-47d2-9360-43a642e9642f-kube-api-access-pf5sg\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.397074 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb03776c-fc68-47d2-9360-43a642e9642f-config-volume\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.404307 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb03776c-fc68-47d2-9360-43a642e9642f-secret-volume\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.415376 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf5sg\" (UniqueName: \"kubernetes.io/projected/cb03776c-fc68-47d2-9360-43a642e9642f-kube-api-access-pf5sg\") pod \"collect-profiles-29402535-zljb6\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:00 crc kubenswrapper[4940]: I1126 10:15:00.521253 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:05 crc kubenswrapper[4940]: I1126 10:15:05.799498 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6"] Nov 26 10:15:06 crc kubenswrapper[4940]: I1126 10:15:06.045540 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" event={"ID":"cb03776c-fc68-47d2-9360-43a642e9642f","Type":"ContainerStarted","Data":"da38baf0f988c55f74d16c212ab99f726bc8e062bca7b513bdcdb4316c08da0b"} Nov 26 10:15:07 crc kubenswrapper[4940]: I1126 10:15:07.066583 4940 generic.go:334] "Generic (PLEG): container finished" podID="cb03776c-fc68-47d2-9360-43a642e9642f" containerID="45289c0eb53241c43e0644d9e63fcb80b9b918b5d84f0ab2d855d641a2f149a6" exitCode=0 Nov 26 10:15:07 crc kubenswrapper[4940]: I1126 10:15:07.066768 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" event={"ID":"cb03776c-fc68-47d2-9360-43a642e9642f","Type":"ContainerDied","Data":"45289c0eb53241c43e0644d9e63fcb80b9b918b5d84f0ab2d855d641a2f149a6"} Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.577918 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.666381 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb03776c-fc68-47d2-9360-43a642e9642f-secret-volume\") pod \"cb03776c-fc68-47d2-9360-43a642e9642f\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.666529 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pf5sg\" (UniqueName: \"kubernetes.io/projected/cb03776c-fc68-47d2-9360-43a642e9642f-kube-api-access-pf5sg\") pod \"cb03776c-fc68-47d2-9360-43a642e9642f\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.666581 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb03776c-fc68-47d2-9360-43a642e9642f-config-volume\") pod \"cb03776c-fc68-47d2-9360-43a642e9642f\" (UID: \"cb03776c-fc68-47d2-9360-43a642e9642f\") " Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.667375 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb03776c-fc68-47d2-9360-43a642e9642f-config-volume" (OuterVolumeSpecName: "config-volume") pod "cb03776c-fc68-47d2-9360-43a642e9642f" (UID: "cb03776c-fc68-47d2-9360-43a642e9642f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.672527 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb03776c-fc68-47d2-9360-43a642e9642f-kube-api-access-pf5sg" (OuterVolumeSpecName: "kube-api-access-pf5sg") pod "cb03776c-fc68-47d2-9360-43a642e9642f" (UID: "cb03776c-fc68-47d2-9360-43a642e9642f"). InnerVolumeSpecName "kube-api-access-pf5sg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.674314 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb03776c-fc68-47d2-9360-43a642e9642f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cb03776c-fc68-47d2-9360-43a642e9642f" (UID: "cb03776c-fc68-47d2-9360-43a642e9642f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.768847 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pf5sg\" (UniqueName: \"kubernetes.io/projected/cb03776c-fc68-47d2-9360-43a642e9642f-kube-api-access-pf5sg\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.768886 4940 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb03776c-fc68-47d2-9360-43a642e9642f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:08 crc kubenswrapper[4940]: I1126 10:15:08.768899 4940 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb03776c-fc68-47d2-9360-43a642e9642f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:09 crc kubenswrapper[4940]: I1126 10:15:09.091872 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" event={"ID":"cb03776c-fc68-47d2-9360-43a642e9642f","Type":"ContainerDied","Data":"da38baf0f988c55f74d16c212ab99f726bc8e062bca7b513bdcdb4316c08da0b"} Nov 26 10:15:09 crc kubenswrapper[4940]: I1126 10:15:09.092250 4940 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da38baf0f988c55f74d16c212ab99f726bc8e062bca7b513bdcdb4316c08da0b" Nov 26 10:15:09 crc kubenswrapper[4940]: I1126 10:15:09.091925 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402535-zljb6" Nov 26 10:15:09 crc kubenswrapper[4940]: I1126 10:15:09.658178 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2"] Nov 26 10:15:09 crc kubenswrapper[4940]: I1126 10:15:09.670010 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402490-fn2p2"] Nov 26 10:15:11 crc kubenswrapper[4940]: I1126 10:15:11.185433 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6341ab6a-df09-4743-a2f4-443ea76ce8eb" path="/var/lib/kubelet/pods/6341ab6a-df09-4743-a2f4-443ea76ce8eb/volumes" Nov 26 10:15:19 crc kubenswrapper[4940]: I1126 10:15:19.030960 4940 scope.go:117] "RemoveContainer" containerID="2e024f13ebf448faa7dbc95e4b8d90dd408d1a75d8f809c8180eb2c0305043db" Nov 26 10:15:41 crc kubenswrapper[4940]: I1126 10:15:41.622894 4940 generic.go:334] "Generic (PLEG): container finished" podID="350681f9-7eb9-461e-ba05-435e04d36593" containerID="97b09cc8ded34b5d54b71a06bff12bd91f7d51533abb2499fc3563bc09773d1e" exitCode=0 Nov 26 10:15:41 crc kubenswrapper[4940]: I1126 10:15:41.622966 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/crc-debug-zvg52" event={"ID":"350681f9-7eb9-461e-ba05-435e04d36593","Type":"ContainerDied","Data":"97b09cc8ded34b5d54b71a06bff12bd91f7d51533abb2499fc3563bc09773d1e"} Nov 26 10:15:42 crc kubenswrapper[4940]: I1126 10:15:42.776649 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:15:42 crc kubenswrapper[4940]: I1126 10:15:42.813164 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-zvg52"] Nov 26 10:15:42 crc kubenswrapper[4940]: I1126 10:15:42.823624 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-zvg52"] Nov 26 10:15:42 crc kubenswrapper[4940]: I1126 10:15:42.925131 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frlk6\" (UniqueName: \"kubernetes.io/projected/350681f9-7eb9-461e-ba05-435e04d36593-kube-api-access-frlk6\") pod \"350681f9-7eb9-461e-ba05-435e04d36593\" (UID: \"350681f9-7eb9-461e-ba05-435e04d36593\") " Nov 26 10:15:42 crc kubenswrapper[4940]: I1126 10:15:42.925564 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/350681f9-7eb9-461e-ba05-435e04d36593-host\") pod \"350681f9-7eb9-461e-ba05-435e04d36593\" (UID: \"350681f9-7eb9-461e-ba05-435e04d36593\") " Nov 26 10:15:42 crc kubenswrapper[4940]: I1126 10:15:42.925753 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/350681f9-7eb9-461e-ba05-435e04d36593-host" (OuterVolumeSpecName: "host") pod "350681f9-7eb9-461e-ba05-435e04d36593" (UID: "350681f9-7eb9-461e-ba05-435e04d36593"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 10:15:42 crc kubenswrapper[4940]: I1126 10:15:42.926707 4940 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/350681f9-7eb9-461e-ba05-435e04d36593-host\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:42 crc kubenswrapper[4940]: I1126 10:15:42.931417 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/350681f9-7eb9-461e-ba05-435e04d36593-kube-api-access-frlk6" (OuterVolumeSpecName: "kube-api-access-frlk6") pod "350681f9-7eb9-461e-ba05-435e04d36593" (UID: "350681f9-7eb9-461e-ba05-435e04d36593"). InnerVolumeSpecName "kube-api-access-frlk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:15:43 crc kubenswrapper[4940]: I1126 10:15:43.028613 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frlk6\" (UniqueName: \"kubernetes.io/projected/350681f9-7eb9-461e-ba05-435e04d36593-kube-api-access-frlk6\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:43 crc kubenswrapper[4940]: I1126 10:15:43.180940 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="350681f9-7eb9-461e-ba05-435e04d36593" path="/var/lib/kubelet/pods/350681f9-7eb9-461e-ba05-435e04d36593/volumes" Nov 26 10:15:43 crc kubenswrapper[4940]: I1126 10:15:43.650002 4940 scope.go:117] "RemoveContainer" containerID="97b09cc8ded34b5d54b71a06bff12bd91f7d51533abb2499fc3563bc09773d1e" Nov 26 10:15:43 crc kubenswrapper[4940]: I1126 10:15:43.650058 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-zvg52" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.105459 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-n2tqq"] Nov 26 10:15:44 crc kubenswrapper[4940]: E1126 10:15:44.105964 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="350681f9-7eb9-461e-ba05-435e04d36593" containerName="container-00" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.105981 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="350681f9-7eb9-461e-ba05-435e04d36593" containerName="container-00" Nov 26 10:15:44 crc kubenswrapper[4940]: E1126 10:15:44.106060 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb03776c-fc68-47d2-9360-43a642e9642f" containerName="collect-profiles" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.106072 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb03776c-fc68-47d2-9360-43a642e9642f" containerName="collect-profiles" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.106328 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="350681f9-7eb9-461e-ba05-435e04d36593" containerName="container-00" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.106356 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb03776c-fc68-47d2-9360-43a642e9642f" containerName="collect-profiles" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.107201 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.255788 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6pfd\" (UniqueName: \"kubernetes.io/projected/6d126710-42d3-4618-a798-7da801d19a55-kube-api-access-x6pfd\") pod \"crc-debug-n2tqq\" (UID: \"6d126710-42d3-4618-a798-7da801d19a55\") " pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.255886 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6d126710-42d3-4618-a798-7da801d19a55-host\") pod \"crc-debug-n2tqq\" (UID: \"6d126710-42d3-4618-a798-7da801d19a55\") " pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.358878 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6pfd\" (UniqueName: \"kubernetes.io/projected/6d126710-42d3-4618-a798-7da801d19a55-kube-api-access-x6pfd\") pod \"crc-debug-n2tqq\" (UID: \"6d126710-42d3-4618-a798-7da801d19a55\") " pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.358998 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6d126710-42d3-4618-a798-7da801d19a55-host\") pod \"crc-debug-n2tqq\" (UID: \"6d126710-42d3-4618-a798-7da801d19a55\") " pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.359170 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6d126710-42d3-4618-a798-7da801d19a55-host\") pod \"crc-debug-n2tqq\" (UID: \"6d126710-42d3-4618-a798-7da801d19a55\") " pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.394224 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6pfd\" (UniqueName: \"kubernetes.io/projected/6d126710-42d3-4618-a798-7da801d19a55-kube-api-access-x6pfd\") pod \"crc-debug-n2tqq\" (UID: \"6d126710-42d3-4618-a798-7da801d19a55\") " pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.432394 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:44 crc kubenswrapper[4940]: W1126 10:15:44.468595 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d126710_42d3_4618_a798_7da801d19a55.slice/crio-1fa069e574f85fb79102ffb1438123c28beed6d4757a3faad0182bd378e1e30a WatchSource:0}: Error finding container 1fa069e574f85fb79102ffb1438123c28beed6d4757a3faad0182bd378e1e30a: Status 404 returned error can't find the container with id 1fa069e574f85fb79102ffb1438123c28beed6d4757a3faad0182bd378e1e30a Nov 26 10:15:44 crc kubenswrapper[4940]: I1126 10:15:44.665484 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" event={"ID":"6d126710-42d3-4618-a798-7da801d19a55","Type":"ContainerStarted","Data":"1fa069e574f85fb79102ffb1438123c28beed6d4757a3faad0182bd378e1e30a"} Nov 26 10:15:45 crc kubenswrapper[4940]: I1126 10:15:45.688813 4940 generic.go:334] "Generic (PLEG): container finished" podID="6d126710-42d3-4618-a798-7da801d19a55" containerID="45491f462425312974a8d82faf978c0f6cdadf2582133c3ce3bd8e2a38da83b8" exitCode=0 Nov 26 10:15:45 crc kubenswrapper[4940]: I1126 10:15:45.689216 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" event={"ID":"6d126710-42d3-4618-a798-7da801d19a55","Type":"ContainerDied","Data":"45491f462425312974a8d82faf978c0f6cdadf2582133c3ce3bd8e2a38da83b8"} Nov 26 10:15:46 crc kubenswrapper[4940]: I1126 10:15:46.366469 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-n2tqq"] Nov 26 10:15:46 crc kubenswrapper[4940]: I1126 10:15:46.375780 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-n2tqq"] Nov 26 10:15:46 crc kubenswrapper[4940]: I1126 10:15:46.817328 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:46 crc kubenswrapper[4940]: I1126 10:15:46.932230 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6pfd\" (UniqueName: \"kubernetes.io/projected/6d126710-42d3-4618-a798-7da801d19a55-kube-api-access-x6pfd\") pod \"6d126710-42d3-4618-a798-7da801d19a55\" (UID: \"6d126710-42d3-4618-a798-7da801d19a55\") " Nov 26 10:15:46 crc kubenswrapper[4940]: I1126 10:15:46.932367 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6d126710-42d3-4618-a798-7da801d19a55-host\") pod \"6d126710-42d3-4618-a798-7da801d19a55\" (UID: \"6d126710-42d3-4618-a798-7da801d19a55\") " Nov 26 10:15:46 crc kubenswrapper[4940]: I1126 10:15:46.932656 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6d126710-42d3-4618-a798-7da801d19a55-host" (OuterVolumeSpecName: "host") pod "6d126710-42d3-4618-a798-7da801d19a55" (UID: "6d126710-42d3-4618-a798-7da801d19a55"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 10:15:46 crc kubenswrapper[4940]: I1126 10:15:46.933552 4940 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6d126710-42d3-4618-a798-7da801d19a55-host\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:46 crc kubenswrapper[4940]: I1126 10:15:46.938915 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d126710-42d3-4618-a798-7da801d19a55-kube-api-access-x6pfd" (OuterVolumeSpecName: "kube-api-access-x6pfd") pod "6d126710-42d3-4618-a798-7da801d19a55" (UID: "6d126710-42d3-4618-a798-7da801d19a55"). InnerVolumeSpecName "kube-api-access-x6pfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.035908 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6pfd\" (UniqueName: \"kubernetes.io/projected/6d126710-42d3-4618-a798-7da801d19a55-kube-api-access-x6pfd\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.186196 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d126710-42d3-4618-a798-7da801d19a55" path="/var/lib/kubelet/pods/6d126710-42d3-4618-a798-7da801d19a55/volumes" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.550586 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-btgpm"] Nov 26 10:15:47 crc kubenswrapper[4940]: E1126 10:15:47.551616 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d126710-42d3-4618-a798-7da801d19a55" containerName="container-00" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.551647 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d126710-42d3-4618-a798-7da801d19a55" containerName="container-00" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.552006 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d126710-42d3-4618-a798-7da801d19a55" containerName="container-00" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.553848 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.647611 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f49bd264-5050-4466-8f56-3e2f4b645840-host\") pod \"crc-debug-btgpm\" (UID: \"f49bd264-5050-4466-8f56-3e2f4b645840\") " pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.647669 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ft7k\" (UniqueName: \"kubernetes.io/projected/f49bd264-5050-4466-8f56-3e2f4b645840-kube-api-access-4ft7k\") pod \"crc-debug-btgpm\" (UID: \"f49bd264-5050-4466-8f56-3e2f4b645840\") " pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.727893 4940 scope.go:117] "RemoveContainer" containerID="45491f462425312974a8d82faf978c0f6cdadf2582133c3ce3bd8e2a38da83b8" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.728184 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-n2tqq" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.750175 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f49bd264-5050-4466-8f56-3e2f4b645840-host\") pod \"crc-debug-btgpm\" (UID: \"f49bd264-5050-4466-8f56-3e2f4b645840\") " pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.750290 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ft7k\" (UniqueName: \"kubernetes.io/projected/f49bd264-5050-4466-8f56-3e2f4b645840-kube-api-access-4ft7k\") pod \"crc-debug-btgpm\" (UID: \"f49bd264-5050-4466-8f56-3e2f4b645840\") " pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.750330 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f49bd264-5050-4466-8f56-3e2f4b645840-host\") pod \"crc-debug-btgpm\" (UID: \"f49bd264-5050-4466-8f56-3e2f4b645840\") " pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.783602 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ft7k\" (UniqueName: \"kubernetes.io/projected/f49bd264-5050-4466-8f56-3e2f4b645840-kube-api-access-4ft7k\") pod \"crc-debug-btgpm\" (UID: \"f49bd264-5050-4466-8f56-3e2f4b645840\") " pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:47 crc kubenswrapper[4940]: I1126 10:15:47.874861 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:47 crc kubenswrapper[4940]: W1126 10:15:47.922387 4940 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf49bd264_5050_4466_8f56_3e2f4b645840.slice/crio-74c8f2edda2d6cb5fcd9748756a471aa5dc22e149ab04230a66f1bb4a3c8c399 WatchSource:0}: Error finding container 74c8f2edda2d6cb5fcd9748756a471aa5dc22e149ab04230a66f1bb4a3c8c399: Status 404 returned error can't find the container with id 74c8f2edda2d6cb5fcd9748756a471aa5dc22e149ab04230a66f1bb4a3c8c399 Nov 26 10:15:48 crc kubenswrapper[4940]: I1126 10:15:48.742191 4940 generic.go:334] "Generic (PLEG): container finished" podID="f49bd264-5050-4466-8f56-3e2f4b645840" containerID="e3787ea2beed2362d5f86f0fd96439fef2319eaaebd6bccb6f11d93a5330292d" exitCode=0 Nov 26 10:15:48 crc kubenswrapper[4940]: I1126 10:15:48.742283 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/crc-debug-btgpm" event={"ID":"f49bd264-5050-4466-8f56-3e2f4b645840","Type":"ContainerDied","Data":"e3787ea2beed2362d5f86f0fd96439fef2319eaaebd6bccb6f11d93a5330292d"} Nov 26 10:15:48 crc kubenswrapper[4940]: I1126 10:15:48.742629 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/crc-debug-btgpm" event={"ID":"f49bd264-5050-4466-8f56-3e2f4b645840","Type":"ContainerStarted","Data":"74c8f2edda2d6cb5fcd9748756a471aa5dc22e149ab04230a66f1bb4a3c8c399"} Nov 26 10:15:48 crc kubenswrapper[4940]: I1126 10:15:48.792062 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-btgpm"] Nov 26 10:15:48 crc kubenswrapper[4940]: I1126 10:15:48.806753 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sd2bd/crc-debug-btgpm"] Nov 26 10:15:49 crc kubenswrapper[4940]: I1126 10:15:49.884671 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:50 crc kubenswrapper[4940]: I1126 10:15:50.007814 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ft7k\" (UniqueName: \"kubernetes.io/projected/f49bd264-5050-4466-8f56-3e2f4b645840-kube-api-access-4ft7k\") pod \"f49bd264-5050-4466-8f56-3e2f4b645840\" (UID: \"f49bd264-5050-4466-8f56-3e2f4b645840\") " Nov 26 10:15:50 crc kubenswrapper[4940]: I1126 10:15:50.008241 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f49bd264-5050-4466-8f56-3e2f4b645840-host\") pod \"f49bd264-5050-4466-8f56-3e2f4b645840\" (UID: \"f49bd264-5050-4466-8f56-3e2f4b645840\") " Nov 26 10:15:50 crc kubenswrapper[4940]: I1126 10:15:50.008360 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f49bd264-5050-4466-8f56-3e2f4b645840-host" (OuterVolumeSpecName: "host") pod "f49bd264-5050-4466-8f56-3e2f4b645840" (UID: "f49bd264-5050-4466-8f56-3e2f4b645840"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 10:15:50 crc kubenswrapper[4940]: I1126 10:15:50.009355 4940 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f49bd264-5050-4466-8f56-3e2f4b645840-host\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:50 crc kubenswrapper[4940]: I1126 10:15:50.015227 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f49bd264-5050-4466-8f56-3e2f4b645840-kube-api-access-4ft7k" (OuterVolumeSpecName: "kube-api-access-4ft7k") pod "f49bd264-5050-4466-8f56-3e2f4b645840" (UID: "f49bd264-5050-4466-8f56-3e2f4b645840"). InnerVolumeSpecName "kube-api-access-4ft7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:15:50 crc kubenswrapper[4940]: I1126 10:15:50.110834 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ft7k\" (UniqueName: \"kubernetes.io/projected/f49bd264-5050-4466-8f56-3e2f4b645840-kube-api-access-4ft7k\") on node \"crc\" DevicePath \"\"" Nov 26 10:15:50 crc kubenswrapper[4940]: I1126 10:15:50.777541 4940 scope.go:117] "RemoveContainer" containerID="e3787ea2beed2362d5f86f0fd96439fef2319eaaebd6bccb6f11d93a5330292d" Nov 26 10:15:50 crc kubenswrapper[4940]: I1126 10:15:50.777570 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/crc-debug-btgpm" Nov 26 10:15:51 crc kubenswrapper[4940]: I1126 10:15:51.183432 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f49bd264-5050-4466-8f56-3e2f4b645840" path="/var/lib/kubelet/pods/f49bd264-5050-4466-8f56-3e2f4b645840/volumes" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.159473 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-crjxk"] Nov 26 10:15:57 crc kubenswrapper[4940]: E1126 10:15:57.160644 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f49bd264-5050-4466-8f56-3e2f4b645840" containerName="container-00" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.160664 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="f49bd264-5050-4466-8f56-3e2f4b645840" containerName="container-00" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.160936 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="f49bd264-5050-4466-8f56-3e2f4b645840" containerName="container-00" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.163077 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.188791 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-crjxk"] Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.281507 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-utilities\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.281660 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtd25\" (UniqueName: \"kubernetes.io/projected/69f95805-2d82-4b28-9d02-b187f12eefe5-kube-api-access-qtd25\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.281702 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-catalog-content\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.383413 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-utilities\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.383556 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtd25\" (UniqueName: \"kubernetes.io/projected/69f95805-2d82-4b28-9d02-b187f12eefe5-kube-api-access-qtd25\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.383591 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-catalog-content\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.384380 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-utilities\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.384475 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-catalog-content\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.404568 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtd25\" (UniqueName: \"kubernetes.io/projected/69f95805-2d82-4b28-9d02-b187f12eefe5-kube-api-access-qtd25\") pod \"redhat-operators-crjxk\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:57 crc kubenswrapper[4940]: I1126 10:15:57.505060 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:15:58 crc kubenswrapper[4940]: I1126 10:15:58.012055 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-crjxk"] Nov 26 10:15:58 crc kubenswrapper[4940]: I1126 10:15:58.881535 4940 generic.go:334] "Generic (PLEG): container finished" podID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerID="698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec" exitCode=0 Nov 26 10:15:58 crc kubenswrapper[4940]: I1126 10:15:58.881595 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crjxk" event={"ID":"69f95805-2d82-4b28-9d02-b187f12eefe5","Type":"ContainerDied","Data":"698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec"} Nov 26 10:15:58 crc kubenswrapper[4940]: I1126 10:15:58.881631 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crjxk" event={"ID":"69f95805-2d82-4b28-9d02-b187f12eefe5","Type":"ContainerStarted","Data":"f79e64ca1527b846683c677471e3e00f959aefbb433498762c28e767f86ec752"} Nov 26 10:16:00 crc kubenswrapper[4940]: I1126 10:16:00.915137 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crjxk" event={"ID":"69f95805-2d82-4b28-9d02-b187f12eefe5","Type":"ContainerStarted","Data":"d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298"} Nov 26 10:16:07 crc kubenswrapper[4940]: I1126 10:16:07.002704 4940 generic.go:334] "Generic (PLEG): container finished" podID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerID="d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298" exitCode=0 Nov 26 10:16:07 crc kubenswrapper[4940]: I1126 10:16:07.002757 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crjxk" event={"ID":"69f95805-2d82-4b28-9d02-b187f12eefe5","Type":"ContainerDied","Data":"d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298"} Nov 26 10:16:09 crc kubenswrapper[4940]: I1126 10:16:09.034342 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crjxk" event={"ID":"69f95805-2d82-4b28-9d02-b187f12eefe5","Type":"ContainerStarted","Data":"d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c"} Nov 26 10:16:09 crc kubenswrapper[4940]: I1126 10:16:09.070099 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-crjxk" podStartSLOduration=3.114695264 podStartE2EDuration="12.070074094s" podCreationTimestamp="2025-11-26 10:15:57 +0000 UTC" firstStartedPulling="2025-11-26 10:15:58.885160934 +0000 UTC m=+12060.405302593" lastFinishedPulling="2025-11-26 10:16:07.840539764 +0000 UTC m=+12069.360681423" observedRunningTime="2025-11-26 10:16:09.063247447 +0000 UTC m=+12070.583389126" watchObservedRunningTime="2025-11-26 10:16:09.070074094 +0000 UTC m=+12070.590215723" Nov 26 10:16:17 crc kubenswrapper[4940]: I1126 10:16:17.506064 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:16:17 crc kubenswrapper[4940]: I1126 10:16:17.506596 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:16:17 crc kubenswrapper[4940]: I1126 10:16:17.585560 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:16:18 crc kubenswrapper[4940]: I1126 10:16:18.213612 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:16:18 crc kubenswrapper[4940]: I1126 10:16:18.280210 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-crjxk"] Nov 26 10:16:20 crc kubenswrapper[4940]: I1126 10:16:20.168400 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-crjxk" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerName="registry-server" containerID="cri-o://d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c" gracePeriod=2 Nov 26 10:16:20 crc kubenswrapper[4940]: I1126 10:16:20.728526 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:16:20 crc kubenswrapper[4940]: I1126 10:16:20.918919 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-utilities\") pod \"69f95805-2d82-4b28-9d02-b187f12eefe5\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " Nov 26 10:16:20 crc kubenswrapper[4940]: I1126 10:16:20.919165 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtd25\" (UniqueName: \"kubernetes.io/projected/69f95805-2d82-4b28-9d02-b187f12eefe5-kube-api-access-qtd25\") pod \"69f95805-2d82-4b28-9d02-b187f12eefe5\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " Nov 26 10:16:20 crc kubenswrapper[4940]: I1126 10:16:20.919245 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-catalog-content\") pod \"69f95805-2d82-4b28-9d02-b187f12eefe5\" (UID: \"69f95805-2d82-4b28-9d02-b187f12eefe5\") " Nov 26 10:16:20 crc kubenswrapper[4940]: I1126 10:16:20.920648 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-utilities" (OuterVolumeSpecName: "utilities") pod "69f95805-2d82-4b28-9d02-b187f12eefe5" (UID: "69f95805-2d82-4b28-9d02-b187f12eefe5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:16:20 crc kubenswrapper[4940]: I1126 10:16:20.926496 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69f95805-2d82-4b28-9d02-b187f12eefe5-kube-api-access-qtd25" (OuterVolumeSpecName: "kube-api-access-qtd25") pod "69f95805-2d82-4b28-9d02-b187f12eefe5" (UID: "69f95805-2d82-4b28-9d02-b187f12eefe5"). InnerVolumeSpecName "kube-api-access-qtd25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.022752 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.022793 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtd25\" (UniqueName: \"kubernetes.io/projected/69f95805-2d82-4b28-9d02-b187f12eefe5-kube-api-access-qtd25\") on node \"crc\" DevicePath \"\"" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.039602 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "69f95805-2d82-4b28-9d02-b187f12eefe5" (UID: "69f95805-2d82-4b28-9d02-b187f12eefe5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.125501 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69f95805-2d82-4b28-9d02-b187f12eefe5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.192975 4940 generic.go:334] "Generic (PLEG): container finished" podID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerID="d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c" exitCode=0 Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.193118 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-crjxk" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.194194 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crjxk" event={"ID":"69f95805-2d82-4b28-9d02-b187f12eefe5","Type":"ContainerDied","Data":"d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c"} Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.194263 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crjxk" event={"ID":"69f95805-2d82-4b28-9d02-b187f12eefe5","Type":"ContainerDied","Data":"f79e64ca1527b846683c677471e3e00f959aefbb433498762c28e767f86ec752"} Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.200138 4940 scope.go:117] "RemoveContainer" containerID="d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.239910 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-crjxk"] Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.248615 4940 scope.go:117] "RemoveContainer" containerID="d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.252778 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-crjxk"] Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.279884 4940 scope.go:117] "RemoveContainer" containerID="698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.350685 4940 scope.go:117] "RemoveContainer" containerID="d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c" Nov 26 10:16:21 crc kubenswrapper[4940]: E1126 10:16:21.351319 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c\": container with ID starting with d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c not found: ID does not exist" containerID="d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.351365 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c"} err="failed to get container status \"d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c\": rpc error: code = NotFound desc = could not find container \"d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c\": container with ID starting with d4d17a3855e73e691f6961b33c8aa29771f74333019a782c6d0c0b080ea7c16c not found: ID does not exist" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.351395 4940 scope.go:117] "RemoveContainer" containerID="d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298" Nov 26 10:16:21 crc kubenswrapper[4940]: E1126 10:16:21.351945 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298\": container with ID starting with d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298 not found: ID does not exist" containerID="d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.351984 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298"} err="failed to get container status \"d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298\": rpc error: code = NotFound desc = could not find container \"d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298\": container with ID starting with d2cfc40f464d8b36fcbd4120220f4c087916b83f6addf26a478298b0f5ce6298 not found: ID does not exist" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.352014 4940 scope.go:117] "RemoveContainer" containerID="698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec" Nov 26 10:16:21 crc kubenswrapper[4940]: E1126 10:16:21.352364 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec\": container with ID starting with 698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec not found: ID does not exist" containerID="698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec" Nov 26 10:16:21 crc kubenswrapper[4940]: I1126 10:16:21.352401 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec"} err="failed to get container status \"698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec\": rpc error: code = NotFound desc = could not find container \"698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec\": container with ID starting with 698459a6eaa9fc6a2b4731e3dab0b2f3fa37c94d7bdf9b60eac3ae3c2ef1caec not found: ID does not exist" Nov 26 10:16:23 crc kubenswrapper[4940]: I1126 10:16:23.186771 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" path="/var/lib/kubelet/pods/69f95805-2d82-4b28-9d02-b187f12eefe5/volumes" Nov 26 10:17:21 crc kubenswrapper[4940]: I1126 10:17:21.728545 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:17:21 crc kubenswrapper[4940]: I1126 10:17:21.729059 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:17:51 crc kubenswrapper[4940]: I1126 10:17:51.728959 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:17:51 crc kubenswrapper[4940]: I1126 10:17:51.729585 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:18:21 crc kubenswrapper[4940]: I1126 10:18:21.729172 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:18:21 crc kubenswrapper[4940]: I1126 10:18:21.729977 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:18:21 crc kubenswrapper[4940]: I1126 10:18:21.730092 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 10:18:21 crc kubenswrapper[4940]: I1126 10:18:21.731089 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f0ccb6a3c589d0db29d14f0495ad6e66e731ab6a4c62b694b69cf952d85d5c3"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 10:18:21 crc kubenswrapper[4940]: I1126 10:18:21.731205 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://7f0ccb6a3c589d0db29d14f0495ad6e66e731ab6a4c62b694b69cf952d85d5c3" gracePeriod=600 Nov 26 10:18:22 crc kubenswrapper[4940]: I1126 10:18:22.707907 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="7f0ccb6a3c589d0db29d14f0495ad6e66e731ab6a4c62b694b69cf952d85d5c3" exitCode=0 Nov 26 10:18:22 crc kubenswrapper[4940]: I1126 10:18:22.711490 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"7f0ccb6a3c589d0db29d14f0495ad6e66e731ab6a4c62b694b69cf952d85d5c3"} Nov 26 10:18:22 crc kubenswrapper[4940]: I1126 10:18:22.711546 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92"} Nov 26 10:18:22 crc kubenswrapper[4940]: I1126 10:18:22.711569 4940 scope.go:117] "RemoveContainer" containerID="c179ffdb84b33f44278d158923b8eaa656b3e63b3de8cf1e0c0105c5920ef24c" Nov 26 10:19:08 crc kubenswrapper[4940]: I1126 10:19:08.600326 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_80d71139-436a-4bad-b084-12b7d2e037c9/init-config-reloader/0.log" Nov 26 10:19:08 crc kubenswrapper[4940]: I1126 10:19:08.834857 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_80d71139-436a-4bad-b084-12b7d2e037c9/alertmanager/0.log" Nov 26 10:19:08 crc kubenswrapper[4940]: I1126 10:19:08.858171 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_80d71139-436a-4bad-b084-12b7d2e037c9/init-config-reloader/0.log" Nov 26 10:19:08 crc kubenswrapper[4940]: I1126 10:19:08.882843 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_80d71139-436a-4bad-b084-12b7d2e037c9/config-reloader/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.047577 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_717eab22-62dc-453f-b481-c30aac72c2ca/aodh-api/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.060015 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_717eab22-62dc-453f-b481-c30aac72c2ca/aodh-evaluator/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.083548 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_717eab22-62dc-453f-b481-c30aac72c2ca/aodh-listener/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.174677 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_717eab22-62dc-453f-b481-c30aac72c2ca/aodh-notifier/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.260907 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-649595bd6-sghk8_8c5bf175-86a7-4ea0-854e-2f2751dcd74f/barbican-api-log/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.300084 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-649595bd6-sghk8_8c5bf175-86a7-4ea0-854e-2f2751dcd74f/barbican-api/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.444285 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-f4477dc54-7qvj8_4066a907-0a22-41a0-bcda-727d4b7cad23/barbican-keystone-listener/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.633846 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-dddb6ff69-6kv98_55c1f084-6852-42a7-bbe2-9d9f1ec146dd/barbican-worker/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.692824 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-dddb6ff69-6kv98_55c1f084-6852-42a7-bbe2-9d9f1ec146dd/barbican-worker-log/0.log" Nov 26 10:19:09 crc kubenswrapper[4940]: I1126 10:19:09.911524 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-jrhf2_ded13a4c-1e1b-4ddc-a6a4-33f15ec88fa4/bootstrap-openstack-openstack-cell1/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.054904 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-networker-rn57g_8d78b5da-8b3a-43fa-a3c0-dbdce05784a7/bootstrap-openstack-openstack-networker/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.176352 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-f4477dc54-7qvj8_4066a907-0a22-41a0-bcda-727d4b7cad23/barbican-keystone-listener-log/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.213703 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8421aed1-48bb-4b33-9e07-b887dfda721a/ceilometer-central-agent/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.261965 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8421aed1-48bb-4b33-9e07-b887dfda721a/ceilometer-notification-agent/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.355594 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8421aed1-48bb-4b33-9e07-b887dfda721a/sg-core/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.356743 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8421aed1-48bb-4b33-9e07-b887dfda721a/proxy-httpd/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.449516 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-openstack-openstack-cell1-zrsks_07af1a08-7b4e-4c4b-b2d5-6f716d5ae725/ceph-client-openstack-openstack-cell1/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.732467 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_80118a04-9a91-4d85-817a-3cd24b169e18/cinder-api/0.log" Nov 26 10:19:10 crc kubenswrapper[4940]: I1126 10:19:10.873158 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_80118a04-9a91-4d85-817a-3cd24b169e18/cinder-api-log/0.log" Nov 26 10:19:11 crc kubenswrapper[4940]: I1126 10:19:11.115011 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_b4b23dcd-1a65-4784-a247-2475cc261618/probe/0.log" Nov 26 10:19:11 crc kubenswrapper[4940]: I1126 10:19:11.197831 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_64575794-fe95-4733-bb74-66dcc92daec4/cinder-scheduler/0.log" Nov 26 10:19:11 crc kubenswrapper[4940]: I1126 10:19:11.461156 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_64575794-fe95-4733-bb74-66dcc92daec4/probe/0.log" Nov 26 10:19:11 crc kubenswrapper[4940]: I1126 10:19:11.556514 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_b4b23dcd-1a65-4784-a247-2475cc261618/cinder-backup/0.log" Nov 26 10:19:11 crc kubenswrapper[4940]: I1126 10:19:11.765671 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_8942266b-29df-4f10-a2fb-b9c1a6921107/probe/0.log" Nov 26 10:19:11 crc kubenswrapper[4940]: I1126 10:19:11.794467 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-xv47l_b6222fff-0241-4d22-b985-de311d9dcd17/configure-network-openstack-openstack-cell1/0.log" Nov 26 10:19:12 crc kubenswrapper[4940]: I1126 10:19:12.327952 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-networker-6jkk6_1ad45eff-0dd2-4c43-b72a-7d852ae00822/configure-network-openstack-openstack-networker/0.log" Nov 26 10:19:12 crc kubenswrapper[4940]: I1126 10:19:12.471374 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-ztjn5_02107190-e269-4d46-a669-6b73512247fa/configure-os-openstack-openstack-cell1/0.log" Nov 26 10:19:12 crc kubenswrapper[4940]: I1126 10:19:12.604072 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-networker-vg4mh_c49828fa-6d5e-4808-a2b7-10cc1fecaa64/configure-os-openstack-openstack-networker/0.log" Nov 26 10:19:12 crc kubenswrapper[4940]: I1126 10:19:12.621645 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7c864b8c85-8zffx_e40e18b4-b34c-474e-a2f4-01e35988aa45/init/0.log" Nov 26 10:19:12 crc kubenswrapper[4940]: I1126 10:19:12.798996 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7c864b8c85-8zffx_e40e18b4-b34c-474e-a2f4-01e35988aa45/init/0.log" Nov 26 10:19:12 crc kubenswrapper[4940]: I1126 10:19:12.848344 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-6krwc_95d0f4e1-aa9b-443e-b5e5-2668c3aa2214/download-cache-openstack-openstack-cell1/0.log" Nov 26 10:19:12 crc kubenswrapper[4940]: I1126 10:19:12.969218 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7c864b8c85-8zffx_e40e18b4-b34c-474e-a2f4-01e35988aa45/dnsmasq-dns/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.053199 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-networker-8kqfq_94b1f446-bfa7-4c5c-9e23-643d37c77a39/download-cache-openstack-openstack-networker/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.187217 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_8942266b-29df-4f10-a2fb-b9c1a6921107/cinder-volume/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.218361 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5e9d6f60-4098-4e31-9153-d48155c79752/glance-httpd/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.245446 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5e9d6f60-4098-4e31-9153-d48155c79752/glance-log/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.398589 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_3b35d195-bf22-45af-b0d6-0f21bf3d5a67/glance-log/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.399186 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_3b35d195-bf22-45af-b0d6-0f21bf3d5a67/glance-httpd/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.568571 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-6ddd77f754-z9z4k_ed206b9d-81f1-49bf-9dc1-d17d76ec052a/heat-api/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.641904 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-754b7cd586-zs8p6_7e312342-1d38-478d-9ddd-dda028582760/heat-engine/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.747522 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-56cc65db64-2vj5l_edb8e38b-3795-4451-9d4d-60f8ccd5bffc/heat-cfnapi/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.839839 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6575b86469-nmbzf_a2a5d0ad-50c9-42da-9978-51fe890fd3c4/horizon/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.908470 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6575b86469-nmbzf_a2a5d0ad-50c9-42da-9978-51fe890fd3c4/horizon-log/0.log" Nov 26 10:19:13 crc kubenswrapper[4940]: I1126 10:19:13.966961 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-cell1-cr4hb_0bdf34ec-75e0-4692-ace7-b208f4ddeaec/install-certs-openstack-openstack-cell1/0.log" Nov 26 10:19:14 crc kubenswrapper[4940]: I1126 10:19:14.016298 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-networker-sz9vh_2e0c436e-dfcf-4eb0-92b6-1e210a026d8c/install-certs-openstack-openstack-networker/0.log" Nov 26 10:19:14 crc kubenswrapper[4940]: I1126 10:19:14.164050 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-2sgj8_146c140b-7adb-4641-b621-e8bd5f3bcb3c/install-os-openstack-openstack-cell1/0.log" Nov 26 10:19:14 crc kubenswrapper[4940]: I1126 10:19:14.200560 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-networker-r7r5p_84b4be12-9142-400e-b21b-2cdd5263b101/install-os-openstack-openstack-networker/0.log" Nov 26 10:19:14 crc kubenswrapper[4940]: I1126 10:19:14.388888 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29402461-k9qcd_df21e3c2-92cf-4d49-b51b-84a895e3e78f/keystone-cron/0.log" Nov 26 10:19:14 crc kubenswrapper[4940]: I1126 10:19:14.563138 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29402521-d622q_34591905-4a5d-490e-9fb2-2cf409cd3aa0/keystone-cron/0.log" Nov 26 10:19:14 crc kubenswrapper[4940]: I1126 10:19:14.643502 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_5171ec99-793d-4fc4-80cd-8e90577c4618/kube-state-metrics/0.log" Nov 26 10:19:14 crc kubenswrapper[4940]: I1126 10:19:14.809963 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-openstack-openstack-cell1-6wckh_a96978df-fadc-461a-91ec-fe51f593b61a/libvirt-openstack-openstack-cell1/0.log" Nov 26 10:19:15 crc kubenswrapper[4940]: I1126 10:19:15.225650 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_f4ad7574-4285-4af3-9ee6-ab07aa65f83f/probe/0.log" Nov 26 10:19:15 crc kubenswrapper[4940]: I1126 10:19:15.242520 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_f4ad7574-4285-4af3-9ee6-ab07aa65f83f/manila-scheduler/0.log" Nov 26 10:19:15 crc kubenswrapper[4940]: I1126 10:19:15.300268 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_0556829d-ebd6-4c87-8d39-dfecb84851d1/manila-api/0.log" Nov 26 10:19:15 crc kubenswrapper[4940]: I1126 10:19:15.302931 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-547b68454-mq24v_41100356-33a2-4f08-be53-7df972e1063f/keystone-api/0.log" Nov 26 10:19:15 crc kubenswrapper[4940]: I1126 10:19:15.417835 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_0556829d-ebd6-4c87-8d39-dfecb84851d1/manila-api-log/0.log" Nov 26 10:19:15 crc kubenswrapper[4940]: I1126 10:19:15.462082 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_c395967c-afac-4a37-9c88-715f6297c9ee/manila-share/0.log" Nov 26 10:19:15 crc kubenswrapper[4940]: I1126 10:19:15.517945 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_c395967c-afac-4a37-9c88-715f6297c9ee/probe/0.log" Nov 26 10:19:16 crc kubenswrapper[4940]: I1126 10:19:16.010526 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dhcp-openstack-openstack-cell1-s5g27_f0ddfc1d-2654-4502-8476-737a6675dc35/neutron-dhcp-openstack-openstack-cell1/0.log" Nov 26 10:19:16 crc kubenswrapper[4940]: I1126 10:19:16.056407 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dd68f5c47-c8tlf_e9a5605c-a7bf-4d94-aab2-053385ccd488/neutron-httpd/0.log" Nov 26 10:19:16 crc kubenswrapper[4940]: I1126 10:19:16.248505 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-cell1-cn5r4_c7eabd60-aa8a-4fd1-bd75-bbfbee7b705a/neutron-metadata-openstack-openstack-cell1/0.log" Nov 26 10:19:16 crc kubenswrapper[4940]: I1126 10:19:16.444527 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-networker-lc79g_420f7d01-ed55-46a9-970a-fbd4beff5c75/neutron-metadata-openstack-openstack-networker/0.log" Nov 26 10:19:16 crc kubenswrapper[4940]: I1126 10:19:16.504906 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dd68f5c47-c8tlf_e9a5605c-a7bf-4d94-aab2-053385ccd488/neutron-api/0.log" Nov 26 10:19:16 crc kubenswrapper[4940]: I1126 10:19:16.581688 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-sriov-openstack-openstack-cell1-sx8hp_af672acf-5122-4828-9aa1-ec7921df77ec/neutron-sriov-openstack-openstack-cell1/0.log" Nov 26 10:19:16 crc kubenswrapper[4940]: I1126 10:19:16.856977 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_f01319e0-009a-4b1e-b8d5-d76d5d180ab4/nova-api-api/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.031938 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_6ab21b1c-967a-485b-aa85-b4027f59d859/nova-cell0-conductor-conductor/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.119804 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_9629092f-7cc7-4e81-94b2-8a021a314962/nova-cell1-conductor-conductor/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.136518 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_f01319e0-009a-4b1e-b8d5-d76d5d180ab4/nova-api-log/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.358904 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_f1710b60-49d1-4953-8bbc-c79734c49f71/nova-cell1-novncproxy-novncproxy/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.485143 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-nova-compute-ffu-cell1-openstack-celljzv6l_4fd0ed71-a15f-4e19-a43f-2822fc14199a/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.666993 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-openstack-cell1-8jmsb_ddec3681-bc80-4513-bb8a-fd193de5f12f/nova-cell1-openstack-openstack-cell1/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.798361 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_785e01eb-e9db-4e60-8cd5-cfeed89c4865/nova-metadata-log/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.875207 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_785e01eb-e9db-4e60-8cd5-cfeed89c4865/nova-metadata-metadata/0.log" Nov 26 10:19:17 crc kubenswrapper[4940]: I1126 10:19:17.998494 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_d97fb3be-c270-4527-a526-686ad8d6fec6/nova-scheduler-scheduler/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.105262 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_262e1270-2fbd-472b-bbd0-680f16ee060f/mysql-bootstrap/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.246310 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_262e1270-2fbd-472b-bbd0-680f16ee060f/mysql-bootstrap/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.265750 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_262e1270-2fbd-472b-bbd0-680f16ee060f/galera/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.302440 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c03b9b4d-9923-4534-94d4-00e6eee88f27/mysql-bootstrap/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.512400 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c03b9b4d-9923-4534-94d4-00e6eee88f27/galera/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.523286 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_70e630f5-14ad-4165-b133-30973d9125d8/openstackclient/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.560232 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c03b9b4d-9923-4534-94d4-00e6eee88f27/mysql-bootstrap/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.723841 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1/openstack-network-exporter/0.log" Nov 26 10:19:18 crc kubenswrapper[4940]: I1126 10:19:18.746504 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1fc4c1a4-2eb4-4aaf-a767-2894a8f8f0d1/ovn-northd/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.031602 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-cell1-22fh4_9675a4b8-6cd8-4b62-a28d-73542c5d1b2a/ovn-openstack-openstack-cell1/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.208900 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_ca2e1407-cdff-467f-976b-25cd954ca90c/openstack-network-exporter/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.216358 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-networker-khrbz_c4653b1a-ca1d-4cdd-8279-fb05af4ee21a/ovn-openstack-openstack-networker/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.283965 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_ca2e1407-cdff-467f-976b-25cd954ca90c/ovsdbserver-nb/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.478627 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_d9b4a37c-681f-4b37-81d0-9444ce90ed8a/ovsdbserver-nb/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.507559 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_d9b4a37c-681f-4b37-81d0-9444ce90ed8a/openstack-network-exporter/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.566130 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_d2b8bb2c-4e49-437e-a546-c844992436f5/openstack-network-exporter/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.646376 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_d2b8bb2c-4e49-437e-a546-c844992436f5/ovsdbserver-nb/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.736684 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_caeadb14-74d5-4e7a-aeb8-4026fe90f57b/openstack-network-exporter/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.803161 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_caeadb14-74d5-4e7a-aeb8-4026fe90f57b/ovsdbserver-sb/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.929452 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_37cd323c-b236-4863-ad21-d6aaf9c48065/openstack-network-exporter/0.log" Nov 26 10:19:19 crc kubenswrapper[4940]: I1126 10:19:19.958795 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_37cd323c-b236-4863-ad21-d6aaf9c48065/ovsdbserver-sb/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.113396 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_1210d1df-4b74-4f1b-83f3-e391e318adb4/ovsdbserver-sb/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.123328 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_1210d1df-4b74-4f1b-83f3-e391e318adb4/openstack-network-exporter/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.436369 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-c2xhbq_801e3632-a0b9-46a0-bff8-0ce14f7f5304/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.465760 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6b875cd8bd-csl9h_d242927a-47af-43db-9ecb-e25ba58cb291/placement-api/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.532490 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6b875cd8bd-csl9h_d242927a-47af-43db-9ecb-e25ba58cb291/placement-log/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.650231 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-n2qh9x_76ea5069-a91e-4ffe-b5c4-41ddd23fd721/pre-adoption-validation-openstack-pre-adoption-openstack-networ/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.716402 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_3b3736b9-b106-4b89-a513-d5e5440ce386/init-config-reloader/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.893421 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_3b3736b9-b106-4b89-a513-d5e5440ce386/prometheus/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.909526 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_3b3736b9-b106-4b89-a513-d5e5440ce386/thanos-sidecar/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.934358 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_3b3736b9-b106-4b89-a513-d5e5440ce386/init-config-reloader/0.log" Nov 26 10:19:20 crc kubenswrapper[4940]: I1126 10:19:20.936666 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_3b3736b9-b106-4b89-a513-d5e5440ce386/config-reloader/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.147759 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2a38e5e7-6d1d-4906-a469-2103514fc67b/setup-container/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.323604 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2a38e5e7-6d1d-4906-a469-2103514fc67b/setup-container/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.363798 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0981b326-3444-45d7-b19d-5a33f431bf84/setup-container/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.367478 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2a38e5e7-6d1d-4906-a469-2103514fc67b/rabbitmq/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.614867 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-cell1-v4djb_38de9225-517b-45c9-8f18-efad998ca841/reboot-os-openstack-openstack-cell1/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.648435 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0981b326-3444-45d7-b19d-5a33f431bf84/setup-container/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.769734 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_0981b326-3444-45d7-b19d-5a33f431bf84/rabbitmq/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.817893 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-networker-8ncnh_6d46bd9e-4b00-47c2-87ab-c82c54b3045b/reboot-os-openstack-openstack-networker/0.log" Nov 26 10:19:21 crc kubenswrapper[4940]: I1126 10:19:21.954097 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-cell1-tkxr6_1e234172-c7fe-4989-b902-38458b50674f/run-os-openstack-openstack-cell1/0.log" Nov 26 10:19:22 crc kubenswrapper[4940]: I1126 10:19:22.084592 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-networker-csmg9_c228e8ac-e24b-4d47-a741-e29048110c53/run-os-openstack-openstack-networker/0.log" Nov 26 10:19:22 crc kubenswrapper[4940]: I1126 10:19:22.190115 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-openstack-452hj_621c07aa-99e8-4ecb-ae8f-bcff3ac22e4e/ssh-known-hosts-openstack/0.log" Nov 26 10:19:22 crc kubenswrapper[4940]: I1126 10:19:22.311673 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-openstack-openstack-cell1-8qd7v_9fc977d5-5151-447c-8006-0a318ab3b23e/telemetry-openstack-openstack-cell1/0.log" Nov 26 10:19:22 crc kubenswrapper[4940]: I1126 10:19:22.505528 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_85a497f3-6d48-4234-92ac-98a55aa14977/tempest-tests-tempest-tests-runner/0.log" Nov 26 10:19:22 crc kubenswrapper[4940]: I1126 10:19:22.534325 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_a444a31f-737b-4340-821b-f639f2fe76bb/test-operator-logs-container/0.log" Nov 26 10:19:22 crc kubenswrapper[4940]: I1126 10:19:22.801801 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-hp2w2_47b63e78-a365-4aef-85a8-4ecd8fb825a8/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Nov 26 10:19:22 crc kubenswrapper[4940]: I1126 10:19:22.837970 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-networker-zz8rv_b6093962-1e67-478a-a37c-8d21eeb86636/tripleo-cleanup-tripleo-cleanup-openstack-networker/0.log" Nov 26 10:19:23 crc kubenswrapper[4940]: I1126 10:19:23.025571 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-x7s87_1fc41899-ad48-4759-8e0b-0f9108597ca3/validate-network-openstack-openstack-cell1/0.log" Nov 26 10:19:23 crc kubenswrapper[4940]: I1126 10:19:23.059339 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-networker-gbqp4_89f56311-2eb5-4dc4-8fb6-137d98ae1c48/validate-network-openstack-openstack-networker/0.log" Nov 26 10:19:36 crc kubenswrapper[4940]: I1126 10:19:36.634613 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_59fdd492-69d6-4325-9c7f-ed4622d6797b/memcached/0.log" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.227887 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6rn9q"] Nov 26 10:19:42 crc kubenswrapper[4940]: E1126 10:19:42.229199 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerName="extract-content" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.229223 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerName="extract-content" Nov 26 10:19:42 crc kubenswrapper[4940]: E1126 10:19:42.229281 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerName="registry-server" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.229292 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerName="registry-server" Nov 26 10:19:42 crc kubenswrapper[4940]: E1126 10:19:42.229317 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerName="extract-utilities" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.229328 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerName="extract-utilities" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.229649 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="69f95805-2d82-4b28-9d02-b187f12eefe5" containerName="registry-server" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.232363 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.246606 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rn9q"] Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.322473 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd9jn\" (UniqueName: \"kubernetes.io/projected/8728a639-bbf1-41f9-ad28-cd826af3ce78-kube-api-access-hd9jn\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.322545 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-utilities\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.322633 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-catalog-content\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.424291 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd9jn\" (UniqueName: \"kubernetes.io/projected/8728a639-bbf1-41f9-ad28-cd826af3ce78-kube-api-access-hd9jn\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.424349 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-utilities\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.424419 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-catalog-content\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.424907 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-catalog-content\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.424994 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-utilities\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.442033 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd9jn\" (UniqueName: \"kubernetes.io/projected/8728a639-bbf1-41f9-ad28-cd826af3ce78-kube-api-access-hd9jn\") pod \"redhat-marketplace-6rn9q\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:42 crc kubenswrapper[4940]: I1126 10:19:42.555942 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:43 crc kubenswrapper[4940]: I1126 10:19:43.027210 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rn9q"] Nov 26 10:19:43 crc kubenswrapper[4940]: I1126 10:19:43.632729 4940 generic.go:334] "Generic (PLEG): container finished" podID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerID="aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025" exitCode=0 Nov 26 10:19:43 crc kubenswrapper[4940]: I1126 10:19:43.632809 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rn9q" event={"ID":"8728a639-bbf1-41f9-ad28-cd826af3ce78","Type":"ContainerDied","Data":"aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025"} Nov 26 10:19:43 crc kubenswrapper[4940]: I1126 10:19:43.633119 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rn9q" event={"ID":"8728a639-bbf1-41f9-ad28-cd826af3ce78","Type":"ContainerStarted","Data":"9405a5cc3ad3ee2d30bff82acd07ff20bbdc23a7e2078733e07f1ccc979223e4"} Nov 26 10:19:43 crc kubenswrapper[4940]: I1126 10:19:43.635212 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 10:19:44 crc kubenswrapper[4940]: I1126 10:19:44.645196 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rn9q" event={"ID":"8728a639-bbf1-41f9-ad28-cd826af3ce78","Type":"ContainerStarted","Data":"1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73"} Nov 26 10:19:45 crc kubenswrapper[4940]: I1126 10:19:45.662112 4940 generic.go:334] "Generic (PLEG): container finished" podID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerID="1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73" exitCode=0 Nov 26 10:19:45 crc kubenswrapper[4940]: I1126 10:19:45.662192 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rn9q" event={"ID":"8728a639-bbf1-41f9-ad28-cd826af3ce78","Type":"ContainerDied","Data":"1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73"} Nov 26 10:19:46 crc kubenswrapper[4940]: I1126 10:19:46.673976 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rn9q" event={"ID":"8728a639-bbf1-41f9-ad28-cd826af3ce78","Type":"ContainerStarted","Data":"c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29"} Nov 26 10:19:46 crc kubenswrapper[4940]: I1126 10:19:46.692483 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6rn9q" podStartSLOduration=2.1774196630000002 podStartE2EDuration="4.692466683s" podCreationTimestamp="2025-11-26 10:19:42 +0000 UTC" firstStartedPulling="2025-11-26 10:19:43.634933749 +0000 UTC m=+12285.155075368" lastFinishedPulling="2025-11-26 10:19:46.149980759 +0000 UTC m=+12287.670122388" observedRunningTime="2025-11-26 10:19:46.690797641 +0000 UTC m=+12288.210939280" watchObservedRunningTime="2025-11-26 10:19:46.692466683 +0000 UTC m=+12288.212608302" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.006438 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k_fcf304eb-07f5-4db8-b8fa-a7b71a11b1be/util/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.175634 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k_fcf304eb-07f5-4db8-b8fa-a7b71a11b1be/pull/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.201241 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k_fcf304eb-07f5-4db8-b8fa-a7b71a11b1be/pull/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.208394 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k_fcf304eb-07f5-4db8-b8fa-a7b71a11b1be/util/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.375132 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k_fcf304eb-07f5-4db8-b8fa-a7b71a11b1be/util/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.376463 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k_fcf304eb-07f5-4db8-b8fa-a7b71a11b1be/pull/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.429095 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006vr54k_fcf304eb-07f5-4db8-b8fa-a7b71a11b1be/extract/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.590828 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hxbtm_86296495-65bc-46a3-a775-621a7bf1745f/kube-rbac-proxy/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.629958 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hxbtm_86296495-65bc-46a3-a775-621a7bf1745f/manager/2.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.653097 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-hxbtm_86296495-65bc-46a3-a775-621a7bf1745f/manager/1.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.784535 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-d75td_7ce6057b-0d67-48fc-9d34-b6574eda6978/kube-rbac-proxy/0.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.820738 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-d75td_7ce6057b-0d67-48fc-9d34-b6574eda6978/manager/1.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.834303 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-d75td_7ce6057b-0d67-48fc-9d34-b6574eda6978/manager/2.log" Nov 26 10:19:48 crc kubenswrapper[4940]: I1126 10:19:48.976954 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-6qpkm_9527c833-8bce-440b-b4e5-ca0a08ef7d28/kube-rbac-proxy/0.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.010960 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-6qpkm_9527c833-8bce-440b-b4e5-ca0a08ef7d28/manager/2.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.062732 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-6qpkm_9527c833-8bce-440b-b4e5-ca0a08ef7d28/manager/1.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.145818 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-55vwf_327827f9-8ca3-4d2e-8478-ace9eb784b21/kube-rbac-proxy/0.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.260259 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-55vwf_327827f9-8ca3-4d2e-8478-ace9eb784b21/manager/2.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.295875 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-55vwf_327827f9-8ca3-4d2e-8478-ace9eb784b21/manager/1.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.352882 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-s2cvl_51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5/kube-rbac-proxy/0.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.458749 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-s2cvl_51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5/manager/2.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.490153 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-s2cvl_51d7a0fe-d1f6-486e-bf6f-c1b89289d0f5/manager/1.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.546091 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-97dsx_b53f82af-849a-47b4-a878-676055ad11ef/kube-rbac-proxy/0.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.684174 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-97dsx_b53f82af-849a-47b4-a878-676055ad11ef/manager/2.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.712566 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-97dsx_b53f82af-849a-47b4-a878-676055ad11ef/manager/1.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.758514 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-txjbc_b805b33b-94ee-4037-907b-339573471ddb/kube-rbac-proxy/0.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.917512 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-txjbc_b805b33b-94ee-4037-907b-339573471ddb/manager/1.log" Nov 26 10:19:49 crc kubenswrapper[4940]: I1126 10:19:49.962944 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-ht6zr_0eca9bcc-5909-48e2-927e-b059359977d5/kube-rbac-proxy/0.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.033856 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-txjbc_b805b33b-94ee-4037-907b-339573471ddb/manager/2.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.120965 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-ht6zr_0eca9bcc-5909-48e2-927e-b059359977d5/manager/3.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.166443 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-ht6zr_0eca9bcc-5909-48e2-927e-b059359977d5/manager/2.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.235839 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-xbbg2_e4255a56-ed59-4cad-90a4-91abb39144d4/kube-rbac-proxy/0.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.361206 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-xbbg2_e4255a56-ed59-4cad-90a4-91abb39144d4/manager/1.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.365827 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-xbbg2_e4255a56-ed59-4cad-90a4-91abb39144d4/manager/2.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.447698 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-qsrc5_68cd61b8-efae-4aef-bd7a-3e90201b5809/kube-rbac-proxy/0.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.577643 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-qsrc5_68cd61b8-efae-4aef-bd7a-3e90201b5809/manager/1.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.607571 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-qsrc5_68cd61b8-efae-4aef-bd7a-3e90201b5809/manager/2.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.638834 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-qwlpn_e46c7b1d-e02f-4807-a650-1038eba64162/kube-rbac-proxy/0.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.779560 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-qwlpn_e46c7b1d-e02f-4807-a650-1038eba64162/manager/2.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.881840 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-qwlpn_e46c7b1d-e02f-4807-a650-1038eba64162/manager/1.log" Nov 26 10:19:50 crc kubenswrapper[4940]: I1126 10:19:50.885474 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-x9ffm_2b2e7f46-8ad4-4361-8e95-76aa1e091665/kube-rbac-proxy/0.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.034125 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-x9ffm_2b2e7f46-8ad4-4361-8e95-76aa1e091665/manager/2.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.148693 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-x9ffm_2b2e7f46-8ad4-4361-8e95-76aa1e091665/manager/1.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.165705 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-lzctw_5c068c7e-f13c-45ca-b161-e590eefdd568/kube-rbac-proxy/0.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.306060 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-lzctw_5c068c7e-f13c-45ca-b161-e590eefdd568/manager/1.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.358642 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-lzctw_5c068c7e-f13c-45ca-b161-e590eefdd568/manager/2.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.424270 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-5p8hf_15d11cf9-51e8-4f1e-880e-86d9bba60224/kube-rbac-proxy/0.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.542753 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-5p8hf_15d11cf9-51e8-4f1e-880e-86d9bba60224/manager/3.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.560241 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-5p8hf_15d11cf9-51e8-4f1e-880e-86d9bba60224/manager/2.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.560919 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp_869e95c8-91e8-4b5a-8eda-35c045ee8cbe/kube-rbac-proxy/0.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.684578 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp_869e95c8-91e8-4b5a-8eda-35c045ee8cbe/manager/1.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.811974 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-qr5vp_869e95c8-91e8-4b5a-8eda-35c045ee8cbe/manager/0.log" Nov 26 10:19:51 crc kubenswrapper[4940]: I1126 10:19:51.990017 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56868586f6-7dkrp_df395369-43ff-4cd2-af6e-60a9a96a4d66/manager/2.log" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.084949 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5675dd9766-49nb9_192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea/operator/1.log" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.388686 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5675dd9766-49nb9_192bcc82-5b25-4cc3-8a03-a9b7b4b1dfea/operator/0.log" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.557224 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.557259 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.572785 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6j69k_41fd5ecb-5c59-4d84-ae08-d7090bb05b3e/registry-server/0.log" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.625756 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.710377 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jxxwj_e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4/kube-rbac-proxy/0.log" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.797874 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jxxwj_e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4/manager/2.log" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.799453 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.889233 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rn9q"] Nov 26 10:19:52 crc kubenswrapper[4940]: I1126 10:19:52.963281 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-2ddr6_7756325b-5cc4-4eb6-ae14-5f71924c3413/kube-rbac-proxy/0.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.014113 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jxxwj_e4762939-1eeb-43ee-b3a7-6ac7f4eaa6d4/manager/1.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.067515 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-2ddr6_7756325b-5cc4-4eb6-ae14-5f71924c3413/manager/2.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.149573 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-2ddr6_7756325b-5cc4-4eb6-ae14-5f71924c3413/manager/1.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.233241 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-zrxd8_e8c9bb46-a618-437a-914b-6cb9c1ede58c/operator/2.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.275933 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-zrxd8_e8c9bb46-a618-437a-914b-6cb9c1ede58c/operator/1.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.487718 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-nwpjs_5ac0ef91-42dc-4bed-b5bc-4c668b3249cc/kube-rbac-proxy/0.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.528980 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-nwpjs_5ac0ef91-42dc-4bed-b5bc-4c668b3249cc/manager/1.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.542956 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-nwpjs_5ac0ef91-42dc-4bed-b5bc-4c668b3249cc/manager/2.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.543877 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56868586f6-7dkrp_df395369-43ff-4cd2-af6e-60a9a96a4d66/manager/3.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.682931 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-jgp88_7fcb4d96-f7a7-4ead-a820-db2eb2785a87/kube-rbac-proxy/0.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.717311 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-jgp88_7fcb4d96-f7a7-4ead-a820-db2eb2785a87/manager/1.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.825192 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-jgp88_7fcb4d96-f7a7-4ead-a820-db2eb2785a87/manager/2.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.862776 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-4j248_53456206-67c0-4503-b72f-909a3ec07b2a/kube-rbac-proxy/0.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.873219 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-4j248_53456206-67c0-4503-b72f-909a3ec07b2a/manager/1.log" Nov 26 10:19:53 crc kubenswrapper[4940]: I1126 10:19:53.957382 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-4j248_53456206-67c0-4503-b72f-909a3ec07b2a/manager/0.log" Nov 26 10:19:54 crc kubenswrapper[4940]: I1126 10:19:54.026869 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-vcr8t_67bde2c7-9e64-469e-b400-071b32f065da/kube-rbac-proxy/0.log" Nov 26 10:19:54 crc kubenswrapper[4940]: I1126 10:19:54.049390 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-vcr8t_67bde2c7-9e64-469e-b400-071b32f065da/manager/2.log" Nov 26 10:19:54 crc kubenswrapper[4940]: I1126 10:19:54.097725 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-vcr8t_67bde2c7-9e64-469e-b400-071b32f065da/manager/1.log" Nov 26 10:19:54 crc kubenswrapper[4940]: I1126 10:19:54.763707 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6rn9q" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerName="registry-server" containerID="cri-o://c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29" gracePeriod=2 Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.303224 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.411247 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hd9jn\" (UniqueName: \"kubernetes.io/projected/8728a639-bbf1-41f9-ad28-cd826af3ce78-kube-api-access-hd9jn\") pod \"8728a639-bbf1-41f9-ad28-cd826af3ce78\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.411640 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-utilities\") pod \"8728a639-bbf1-41f9-ad28-cd826af3ce78\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.411748 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-catalog-content\") pod \"8728a639-bbf1-41f9-ad28-cd826af3ce78\" (UID: \"8728a639-bbf1-41f9-ad28-cd826af3ce78\") " Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.412223 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-utilities" (OuterVolumeSpecName: "utilities") pod "8728a639-bbf1-41f9-ad28-cd826af3ce78" (UID: "8728a639-bbf1-41f9-ad28-cd826af3ce78"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.417843 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8728a639-bbf1-41f9-ad28-cd826af3ce78-kube-api-access-hd9jn" (OuterVolumeSpecName: "kube-api-access-hd9jn") pod "8728a639-bbf1-41f9-ad28-cd826af3ce78" (UID: "8728a639-bbf1-41f9-ad28-cd826af3ce78"). InnerVolumeSpecName "kube-api-access-hd9jn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.428611 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8728a639-bbf1-41f9-ad28-cd826af3ce78" (UID: "8728a639-bbf1-41f9-ad28-cd826af3ce78"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.514116 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hd9jn\" (UniqueName: \"kubernetes.io/projected/8728a639-bbf1-41f9-ad28-cd826af3ce78-kube-api-access-hd9jn\") on node \"crc\" DevicePath \"\"" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.514169 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.514181 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8728a639-bbf1-41f9-ad28-cd826af3ce78-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.777195 4940 generic.go:334] "Generic (PLEG): container finished" podID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerID="c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29" exitCode=0 Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.777245 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rn9q" event={"ID":"8728a639-bbf1-41f9-ad28-cd826af3ce78","Type":"ContainerDied","Data":"c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29"} Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.777274 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6rn9q" event={"ID":"8728a639-bbf1-41f9-ad28-cd826af3ce78","Type":"ContainerDied","Data":"9405a5cc3ad3ee2d30bff82acd07ff20bbdc23a7e2078733e07f1ccc979223e4"} Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.777295 4940 scope.go:117] "RemoveContainer" containerID="c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.777458 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6rn9q" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.808182 4940 scope.go:117] "RemoveContainer" containerID="1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.824443 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rn9q"] Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.838357 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6rn9q"] Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.856903 4940 scope.go:117] "RemoveContainer" containerID="aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.888431 4940 scope.go:117] "RemoveContainer" containerID="c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29" Nov 26 10:19:55 crc kubenswrapper[4940]: E1126 10:19:55.888930 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29\": container with ID starting with c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29 not found: ID does not exist" containerID="c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.888989 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29"} err="failed to get container status \"c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29\": rpc error: code = NotFound desc = could not find container \"c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29\": container with ID starting with c68dec2e2223f48ce3f1c67bb0e6205c52d3577d0c56a4fa2d47b3e4da405f29 not found: ID does not exist" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.889035 4940 scope.go:117] "RemoveContainer" containerID="1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73" Nov 26 10:19:55 crc kubenswrapper[4940]: E1126 10:19:55.889613 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73\": container with ID starting with 1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73 not found: ID does not exist" containerID="1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.889644 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73"} err="failed to get container status \"1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73\": rpc error: code = NotFound desc = could not find container \"1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73\": container with ID starting with 1855bfdc1229f737770568f31b4d248cb7d8ec59171fa4993a805a003c5b9e73 not found: ID does not exist" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.889667 4940 scope.go:117] "RemoveContainer" containerID="aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025" Nov 26 10:19:55 crc kubenswrapper[4940]: E1126 10:19:55.889964 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025\": container with ID starting with aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025 not found: ID does not exist" containerID="aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025" Nov 26 10:19:55 crc kubenswrapper[4940]: I1126 10:19:55.889987 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025"} err="failed to get container status \"aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025\": rpc error: code = NotFound desc = could not find container \"aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025\": container with ID starting with aeee81f39ec7b5731a8fe3f1add677946b1b0c10c776434ab39e673a8b821025 not found: ID does not exist" Nov 26 10:19:57 crc kubenswrapper[4940]: I1126 10:19:57.175709 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" path="/var/lib/kubelet/pods/8728a639-bbf1-41f9-ad28-cd826af3ce78/volumes" Nov 26 10:20:15 crc kubenswrapper[4940]: I1126 10:20:15.383825 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-gt48v_2d74f8fa-d746-4920-ac46-e6a4a7a501f6/control-plane-machine-set-operator/0.log" Nov 26 10:20:15 crc kubenswrapper[4940]: I1126 10:20:15.540423 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-gwrr8_40c806a8-67f5-497d-91d8-6ce10f60e79a/kube-rbac-proxy/0.log" Nov 26 10:20:15 crc kubenswrapper[4940]: I1126 10:20:15.605429 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-gwrr8_40c806a8-67f5-497d-91d8-6ce10f60e79a/machine-api-operator/0.log" Nov 26 10:20:30 crc kubenswrapper[4940]: I1126 10:20:30.312997 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-6tzhl_6a36e7f5-47fa-4fcf-ad99-5f538e358254/cert-manager-controller/0.log" Nov 26 10:20:30 crc kubenswrapper[4940]: I1126 10:20:30.485775 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-pjqx6_3632747e-e8d6-4971-a5bf-d07117d69ae7/cert-manager-cainjector/1.log" Nov 26 10:20:30 crc kubenswrapper[4940]: I1126 10:20:30.524579 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-pjqx6_3632747e-e8d6-4971-a5bf-d07117d69ae7/cert-manager-cainjector/0.log" Nov 26 10:20:30 crc kubenswrapper[4940]: I1126 10:20:30.542966 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-7tbl5_1bdea5c1-9b04-44ee-8e41-2cb739ab9ff5/cert-manager-webhook/0.log" Nov 26 10:20:44 crc kubenswrapper[4940]: I1126 10:20:44.854243 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-m99d2_ee144795-d94d-4e4a-8e98-6e9d554513f9/nmstate-console-plugin/0.log" Nov 26 10:20:45 crc kubenswrapper[4940]: I1126 10:20:45.010551 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-w7vjz_4e1ada7b-3473-46da-bae3-457ac931f202/nmstate-handler/0.log" Nov 26 10:20:45 crc kubenswrapper[4940]: I1126 10:20:45.071949 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-2djsj_9dffd2d7-450f-4452-8aac-29ca0dae27b2/kube-rbac-proxy/0.log" Nov 26 10:20:45 crc kubenswrapper[4940]: I1126 10:20:45.117359 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-2djsj_9dffd2d7-450f-4452-8aac-29ca0dae27b2/nmstate-metrics/0.log" Nov 26 10:20:45 crc kubenswrapper[4940]: I1126 10:20:45.240581 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-5fsff_8200355b-df22-4b1d-8cd2-578662a58762/nmstate-operator/0.log" Nov 26 10:20:45 crc kubenswrapper[4940]: I1126 10:20:45.292217 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-ll7sq_0acfba00-a78f-4f07-9273-dbd13ea957db/nmstate-webhook/0.log" Nov 26 10:20:51 crc kubenswrapper[4940]: I1126 10:20:51.728083 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:20:51 crc kubenswrapper[4940]: I1126 10:20:51.728548 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.141131 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-lphz8_faeaf9d9-6067-4ecd-b240-4909087180dc/kube-rbac-proxy/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.334738 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-frr-files/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.511655 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-lphz8_faeaf9d9-6067-4ecd-b240-4909087180dc/controller/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.571013 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-frr-files/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.596707 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-metrics/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.598360 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-reloader/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.665987 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-reloader/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.864900 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-metrics/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.875232 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-reloader/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.875917 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-metrics/0.log" Nov 26 10:21:01 crc kubenswrapper[4940]: I1126 10:21:01.885832 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-frr-files/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.042215 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-metrics/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.042258 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-reloader/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.049423 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/cp-frr-files/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.091761 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/controller/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.218830 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/frr-metrics/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.256487 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/kube-rbac-proxy/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.288537 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/kube-rbac-proxy-frr/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.436246 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/reloader/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.489226 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-lwp6c_09a94cb0-e4d0-4945-9f9e-9912f64ee105/frr-k8s-webhook-server/0.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.652017 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6f8d6cc986-tlmk2_4415b953-7e66-4d84-acde-32474c6d0ebf/manager/3.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.732676 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6f8d6cc986-tlmk2_4415b953-7e66-4d84-acde-32474c6d0ebf/manager/2.log" Nov 26 10:21:02 crc kubenswrapper[4940]: I1126 10:21:02.865922 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-57fd5d9966-flsrs_9238138e-b446-46e2-81b3-802bacc8e544/webhook-server/0.log" Nov 26 10:21:03 crc kubenswrapper[4940]: I1126 10:21:03.040909 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-4rnjg_1679bb4d-d17d-4e92-b72a-55e5927bedd6/kube-rbac-proxy/0.log" Nov 26 10:21:03 crc kubenswrapper[4940]: I1126 10:21:03.909705 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-4rnjg_1679bb4d-d17d-4e92-b72a-55e5927bedd6/speaker/0.log" Nov 26 10:21:05 crc kubenswrapper[4940]: I1126 10:21:05.948492 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jxkgj_8c4f4ea4-d726-4477-a94e-d23464e00e6a/frr/0.log" Nov 26 10:21:17 crc kubenswrapper[4940]: I1126 10:21:17.685004 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m_737d785a-247d-4d8f-ba8d-d2488bf741c3/util/0.log" Nov 26 10:21:17 crc kubenswrapper[4940]: I1126 10:21:17.866450 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m_737d785a-247d-4d8f-ba8d-d2488bf741c3/pull/0.log" Nov 26 10:21:17 crc kubenswrapper[4940]: I1126 10:21:17.869890 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m_737d785a-247d-4d8f-ba8d-d2488bf741c3/util/0.log" Nov 26 10:21:17 crc kubenswrapper[4940]: I1126 10:21:17.897694 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m_737d785a-247d-4d8f-ba8d-d2488bf741c3/pull/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.041450 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m_737d785a-247d-4d8f-ba8d-d2488bf741c3/pull/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.049457 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m_737d785a-247d-4d8f-ba8d-d2488bf741c3/extract/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.063782 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ahjl6m_737d785a-247d-4d8f-ba8d-d2488bf741c3/util/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.207361 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm_d3a179ca-909c-4b27-a176-e91d5d64399b/util/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.344983 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm_d3a179ca-909c-4b27-a176-e91d5d64399b/util/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.375966 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm_d3a179ca-909c-4b27-a176-e91d5d64399b/pull/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.376249 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm_d3a179ca-909c-4b27-a176-e91d5d64399b/pull/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.576916 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm_d3a179ca-909c-4b27-a176-e91d5d64399b/util/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.590162 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm_d3a179ca-909c-4b27-a176-e91d5d64399b/pull/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.610175 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ej6tvm_d3a179ca-909c-4b27-a176-e91d5d64399b/extract/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.746701 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf_fea8b3a9-1dfd-4026-bd0f-c8940c00d8af/util/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.920262 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf_fea8b3a9-1dfd-4026-bd0f-c8940c00d8af/util/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.920675 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf_fea8b3a9-1dfd-4026-bd0f-c8940c00d8af/pull/0.log" Nov 26 10:21:18 crc kubenswrapper[4940]: I1126 10:21:18.949876 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf_fea8b3a9-1dfd-4026-bd0f-c8940c00d8af/pull/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.099978 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf_fea8b3a9-1dfd-4026-bd0f-c8940c00d8af/pull/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.134939 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf_fea8b3a9-1dfd-4026-bd0f-c8940c00d8af/util/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.141173 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210d79sf_fea8b3a9-1dfd-4026-bd0f-c8940c00d8af/extract/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.309543 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ghgss_2600a6f3-fe01-4cca-9e56-a8981611d6e9/extract-utilities/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.437973 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ghgss_2600a6f3-fe01-4cca-9e56-a8981611d6e9/extract-utilities/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.438144 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ghgss_2600a6f3-fe01-4cca-9e56-a8981611d6e9/extract-content/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.464816 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ghgss_2600a6f3-fe01-4cca-9e56-a8981611d6e9/extract-content/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.635761 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ghgss_2600a6f3-fe01-4cca-9e56-a8981611d6e9/extract-utilities/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.639535 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ghgss_2600a6f3-fe01-4cca-9e56-a8981611d6e9/extract-content/0.log" Nov 26 10:21:19 crc kubenswrapper[4940]: I1126 10:21:19.828227 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lrt8v_199be261-efb1-49e3-8e6e-f2237eafc202/extract-utilities/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.094463 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lrt8v_199be261-efb1-49e3-8e6e-f2237eafc202/extract-utilities/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.098811 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lrt8v_199be261-efb1-49e3-8e6e-f2237eafc202/extract-content/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.178550 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lrt8v_199be261-efb1-49e3-8e6e-f2237eafc202/extract-content/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.305119 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lrt8v_199be261-efb1-49e3-8e6e-f2237eafc202/extract-content/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.337200 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lrt8v_199be261-efb1-49e3-8e6e-f2237eafc202/extract-utilities/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.628782 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn_a177e713-0abb-486a-bd7f-cc80ea4a762e/util/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.783207 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn_a177e713-0abb-486a-bd7f-cc80ea4a762e/pull/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.808895 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn_a177e713-0abb-486a-bd7f-cc80ea4a762e/util/0.log" Nov 26 10:21:20 crc kubenswrapper[4940]: I1126 10:21:20.905233 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn_a177e713-0abb-486a-bd7f-cc80ea4a762e/pull/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.154477 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn_a177e713-0abb-486a-bd7f-cc80ea4a762e/extract/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.161694 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn_a177e713-0abb-486a-bd7f-cc80ea4a762e/pull/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.192134 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6m67kn_a177e713-0abb-486a-bd7f-cc80ea4a762e/util/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.357791 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-r5797_de960209-97f8-4192-8dce-db459972eede/marketplace-operator/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.430354 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ghgss_2600a6f3-fe01-4cca-9e56-a8981611d6e9/registry-server/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.537699 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpxgr_ee57acbd-ac45-48fa-9ced-dbf75469d3db/extract-utilities/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.728126 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.729328 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.736990 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpxgr_ee57acbd-ac45-48fa-9ced-dbf75469d3db/extract-utilities/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.756218 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpxgr_ee57acbd-ac45-48fa-9ced-dbf75469d3db/extract-content/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.779277 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpxgr_ee57acbd-ac45-48fa-9ced-dbf75469d3db/extract-content/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.936528 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpxgr_ee57acbd-ac45-48fa-9ced-dbf75469d3db/extract-utilities/0.log" Nov 26 10:21:21 crc kubenswrapper[4940]: I1126 10:21:21.964478 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpxgr_ee57acbd-ac45-48fa-9ced-dbf75469d3db/extract-content/0.log" Nov 26 10:21:22 crc kubenswrapper[4940]: I1126 10:21:22.189559 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v2ldj_49e769a8-5ba1-466e-bdbb-01367e025ad1/extract-utilities/0.log" Nov 26 10:21:22 crc kubenswrapper[4940]: I1126 10:21:22.367328 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v2ldj_49e769a8-5ba1-466e-bdbb-01367e025ad1/extract-utilities/0.log" Nov 26 10:21:22 crc kubenswrapper[4940]: I1126 10:21:22.419691 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v2ldj_49e769a8-5ba1-466e-bdbb-01367e025ad1/extract-content/0.log" Nov 26 10:21:22 crc kubenswrapper[4940]: I1126 10:21:22.462076 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v2ldj_49e769a8-5ba1-466e-bdbb-01367e025ad1/extract-content/0.log" Nov 26 10:21:22 crc kubenswrapper[4940]: I1126 10:21:22.571444 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpxgr_ee57acbd-ac45-48fa-9ced-dbf75469d3db/registry-server/0.log" Nov 26 10:21:22 crc kubenswrapper[4940]: I1126 10:21:22.644397 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-lrt8v_199be261-efb1-49e3-8e6e-f2237eafc202/registry-server/0.log" Nov 26 10:21:22 crc kubenswrapper[4940]: I1126 10:21:22.648971 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v2ldj_49e769a8-5ba1-466e-bdbb-01367e025ad1/extract-content/0.log" Nov 26 10:21:22 crc kubenswrapper[4940]: I1126 10:21:22.690565 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v2ldj_49e769a8-5ba1-466e-bdbb-01367e025ad1/extract-utilities/0.log" Nov 26 10:21:24 crc kubenswrapper[4940]: I1126 10:21:24.163386 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v2ldj_49e769a8-5ba1-466e-bdbb-01367e025ad1/registry-server/0.log" Nov 26 10:21:36 crc kubenswrapper[4940]: I1126 10:21:36.885568 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-6dwh7_9d602305-12d4-40ac-a76d-4cb2dc13381f/prometheus-operator/0.log" Nov 26 10:21:37 crc kubenswrapper[4940]: I1126 10:21:37.039570 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6d66954595-b5b64_760eb996-e2fd-47b9-a005-280453befb3f/prometheus-operator-admission-webhook/0.log" Nov 26 10:21:37 crc kubenswrapper[4940]: I1126 10:21:37.107347 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6d66954595-r4z7j_dff01c39-1a77-43f8-a784-9ec5dff28a90/prometheus-operator-admission-webhook/0.log" Nov 26 10:21:37 crc kubenswrapper[4940]: I1126 10:21:37.235450 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-tqmf7_7a938eea-838c-424d-9d89-5b5978dfcf79/operator/0.log" Nov 26 10:21:37 crc kubenswrapper[4940]: I1126 10:21:37.286005 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-qvkhc_6bd0a4fb-63e0-4e35-b671-f4648b20cf4e/perses-operator/0.log" Nov 26 10:21:51 crc kubenswrapper[4940]: E1126 10:21:51.226622 4940 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.58:36322->38.102.83.58:46351: write tcp 38.102.83.58:36322->38.102.83.58:46351: write: broken pipe Nov 26 10:21:51 crc kubenswrapper[4940]: I1126 10:21:51.728734 4940 patch_prober.go:28] interesting pod/machine-config-daemon-kbfvm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 10:21:51 crc kubenswrapper[4940]: I1126 10:21:51.729004 4940 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 10:21:51 crc kubenswrapper[4940]: I1126 10:21:51.729059 4940 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" Nov 26 10:21:51 crc kubenswrapper[4940]: I1126 10:21:51.729768 4940 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92"} pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 10:21:51 crc kubenswrapper[4940]: I1126 10:21:51.729821 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerName="machine-config-daemon" containerID="cri-o://ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" gracePeriod=600 Nov 26 10:21:51 crc kubenswrapper[4940]: E1126 10:21:51.858596 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:21:52 crc kubenswrapper[4940]: I1126 10:21:52.161570 4940 generic.go:334] "Generic (PLEG): container finished" podID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" exitCode=0 Nov 26 10:21:52 crc kubenswrapper[4940]: I1126 10:21:52.161621 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerDied","Data":"ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92"} Nov 26 10:21:52 crc kubenswrapper[4940]: I1126 10:21:52.161657 4940 scope.go:117] "RemoveContainer" containerID="7f0ccb6a3c589d0db29d14f0495ad6e66e731ab6a4c62b694b69cf952d85d5c3" Nov 26 10:21:52 crc kubenswrapper[4940]: I1126 10:21:52.162407 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:21:52 crc kubenswrapper[4940]: E1126 10:21:52.163748 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:22:03 crc kubenswrapper[4940]: I1126 10:22:03.166444 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:22:03 crc kubenswrapper[4940]: E1126 10:22:03.167606 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:22:15 crc kubenswrapper[4940]: I1126 10:22:15.167218 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:22:15 crc kubenswrapper[4940]: E1126 10:22:15.168288 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:22:28 crc kubenswrapper[4940]: I1126 10:22:28.165978 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:22:28 crc kubenswrapper[4940]: E1126 10:22:28.166826 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:22:42 crc kubenswrapper[4940]: I1126 10:22:42.165974 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:22:42 crc kubenswrapper[4940]: E1126 10:22:42.167089 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:22:55 crc kubenswrapper[4940]: I1126 10:22:55.166783 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:22:55 crc kubenswrapper[4940]: E1126 10:22:55.167910 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:23:08 crc kubenswrapper[4940]: I1126 10:23:08.166447 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:23:08 crc kubenswrapper[4940]: E1126 10:23:08.167541 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:23:19 crc kubenswrapper[4940]: I1126 10:23:19.177715 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:23:19 crc kubenswrapper[4940]: E1126 10:23:19.180068 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:23:32 crc kubenswrapper[4940]: I1126 10:23:32.165885 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:23:32 crc kubenswrapper[4940]: E1126 10:23:32.166746 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:23:43 crc kubenswrapper[4940]: I1126 10:23:43.166411 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:23:43 crc kubenswrapper[4940]: E1126 10:23:43.167121 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:23:56 crc kubenswrapper[4940]: I1126 10:23:56.167772 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:23:56 crc kubenswrapper[4940]: E1126 10:23:56.169345 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:24:11 crc kubenswrapper[4940]: I1126 10:24:11.166374 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:24:11 crc kubenswrapper[4940]: E1126 10:24:11.167114 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:24:22 crc kubenswrapper[4940]: I1126 10:24:22.165622 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:24:22 crc kubenswrapper[4940]: E1126 10:24:22.166377 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:24:37 crc kubenswrapper[4940]: I1126 10:24:37.165845 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:24:37 crc kubenswrapper[4940]: E1126 10:24:37.166719 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:24:52 crc kubenswrapper[4940]: I1126 10:24:52.165994 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:24:52 crc kubenswrapper[4940]: E1126 10:24:52.166887 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:25:05 crc kubenswrapper[4940]: I1126 10:25:05.166577 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:25:05 crc kubenswrapper[4940]: E1126 10:25:05.167651 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:25:18 crc kubenswrapper[4940]: I1126 10:25:18.066812 4940 generic.go:334] "Generic (PLEG): container finished" podID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerID="db4d3fc42c5c8da822130bf6930eccdc10d25e447d774a7fab6d6cd3d4f5ae72" exitCode=0 Nov 26 10:25:18 crc kubenswrapper[4940]: I1126 10:25:18.066872 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" event={"ID":"ec6431e5-59f3-42f0-af2b-0672303ec132","Type":"ContainerDied","Data":"db4d3fc42c5c8da822130bf6930eccdc10d25e447d774a7fab6d6cd3d4f5ae72"} Nov 26 10:25:18 crc kubenswrapper[4940]: I1126 10:25:18.068288 4940 scope.go:117] "RemoveContainer" containerID="db4d3fc42c5c8da822130bf6930eccdc10d25e447d774a7fab6d6cd3d4f5ae72" Nov 26 10:25:18 crc kubenswrapper[4940]: I1126 10:25:18.691894 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sd2bd_must-gather-g6wxz_ec6431e5-59f3-42f0-af2b-0672303ec132/gather/0.log" Nov 26 10:25:19 crc kubenswrapper[4940]: I1126 10:25:19.173568 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:25:19 crc kubenswrapper[4940]: E1126 10:25:19.173878 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:25:29 crc kubenswrapper[4940]: I1126 10:25:29.927142 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sd2bd/must-gather-g6wxz"] Nov 26 10:25:29 crc kubenswrapper[4940]: I1126 10:25:29.928021 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" podUID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerName="copy" containerID="cri-o://6430d60207abc4a4b59d30dec5087661c804e4209c70bad8a4b2206963ca5f45" gracePeriod=2 Nov 26 10:25:29 crc kubenswrapper[4940]: I1126 10:25:29.938218 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sd2bd/must-gather-g6wxz"] Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.219712 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sd2bd_must-gather-g6wxz_ec6431e5-59f3-42f0-af2b-0672303ec132/copy/0.log" Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.220158 4940 generic.go:334] "Generic (PLEG): container finished" podID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerID="6430d60207abc4a4b59d30dec5087661c804e4209c70bad8a4b2206963ca5f45" exitCode=143 Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.394880 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sd2bd_must-gather-g6wxz_ec6431e5-59f3-42f0-af2b-0672303ec132/copy/0.log" Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.395486 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.446433 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bj7k\" (UniqueName: \"kubernetes.io/projected/ec6431e5-59f3-42f0-af2b-0672303ec132-kube-api-access-2bj7k\") pod \"ec6431e5-59f3-42f0-af2b-0672303ec132\" (UID: \"ec6431e5-59f3-42f0-af2b-0672303ec132\") " Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.446574 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ec6431e5-59f3-42f0-af2b-0672303ec132-must-gather-output\") pod \"ec6431e5-59f3-42f0-af2b-0672303ec132\" (UID: \"ec6431e5-59f3-42f0-af2b-0672303ec132\") " Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.454196 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec6431e5-59f3-42f0-af2b-0672303ec132-kube-api-access-2bj7k" (OuterVolumeSpecName: "kube-api-access-2bj7k") pod "ec6431e5-59f3-42f0-af2b-0672303ec132" (UID: "ec6431e5-59f3-42f0-af2b-0672303ec132"). InnerVolumeSpecName "kube-api-access-2bj7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.555074 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bj7k\" (UniqueName: \"kubernetes.io/projected/ec6431e5-59f3-42f0-af2b-0672303ec132-kube-api-access-2bj7k\") on node \"crc\" DevicePath \"\"" Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.733901 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec6431e5-59f3-42f0-af2b-0672303ec132-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "ec6431e5-59f3-42f0-af2b-0672303ec132" (UID: "ec6431e5-59f3-42f0-af2b-0672303ec132"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:25:30 crc kubenswrapper[4940]: I1126 10:25:30.759870 4940 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ec6431e5-59f3-42f0-af2b-0672303ec132-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 26 10:25:31 crc kubenswrapper[4940]: I1126 10:25:31.165778 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:25:31 crc kubenswrapper[4940]: E1126 10:25:31.166343 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:25:31 crc kubenswrapper[4940]: I1126 10:25:31.176634 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec6431e5-59f3-42f0-af2b-0672303ec132" path="/var/lib/kubelet/pods/ec6431e5-59f3-42f0-af2b-0672303ec132/volumes" Nov 26 10:25:31 crc kubenswrapper[4940]: I1126 10:25:31.233506 4940 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sd2bd_must-gather-g6wxz_ec6431e5-59f3-42f0-af2b-0672303ec132/copy/0.log" Nov 26 10:25:31 crc kubenswrapper[4940]: I1126 10:25:31.233933 4940 scope.go:117] "RemoveContainer" containerID="6430d60207abc4a4b59d30dec5087661c804e4209c70bad8a4b2206963ca5f45" Nov 26 10:25:31 crc kubenswrapper[4940]: I1126 10:25:31.234013 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sd2bd/must-gather-g6wxz" Nov 26 10:25:31 crc kubenswrapper[4940]: I1126 10:25:31.269562 4940 scope.go:117] "RemoveContainer" containerID="db4d3fc42c5c8da822130bf6930eccdc10d25e447d774a7fab6d6cd3d4f5ae72" Nov 26 10:25:45 crc kubenswrapper[4940]: I1126 10:25:45.166629 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:25:45 crc kubenswrapper[4940]: E1126 10:25:45.167314 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:25:57 crc kubenswrapper[4940]: I1126 10:25:57.165679 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:25:57 crc kubenswrapper[4940]: E1126 10:25:57.166619 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:26:12 crc kubenswrapper[4940]: I1126 10:26:12.165965 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:26:12 crc kubenswrapper[4940]: E1126 10:26:12.167180 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:26:27 crc kubenswrapper[4940]: I1126 10:26:27.166480 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:26:27 crc kubenswrapper[4940]: E1126 10:26:27.167825 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.435093 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-h2jmd"] Nov 26 10:26:32 crc kubenswrapper[4940]: E1126 10:26:32.436064 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerName="gather" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.436078 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerName="gather" Nov 26 10:26:32 crc kubenswrapper[4940]: E1126 10:26:32.436113 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerName="registry-server" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.436121 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerName="registry-server" Nov 26 10:26:32 crc kubenswrapper[4940]: E1126 10:26:32.436143 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerName="copy" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.436151 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerName="copy" Nov 26 10:26:32 crc kubenswrapper[4940]: E1126 10:26:32.436164 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerName="extract-content" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.436172 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerName="extract-content" Nov 26 10:26:32 crc kubenswrapper[4940]: E1126 10:26:32.436186 4940 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerName="extract-utilities" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.436195 4940 state_mem.go:107] "Deleted CPUSet assignment" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerName="extract-utilities" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.436430 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerName="gather" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.436454 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="8728a639-bbf1-41f9-ad28-cd826af3ce78" containerName="registry-server" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.436475 4940 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6431e5-59f3-42f0-af2b-0672303ec132" containerName="copy" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.438287 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.448964 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h2jmd"] Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.492788 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89hkz\" (UniqueName: \"kubernetes.io/projected/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-kube-api-access-89hkz\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.492840 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-catalog-content\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.492864 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-utilities\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.595586 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-utilities\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.596117 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-utilities\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.596122 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89hkz\" (UniqueName: \"kubernetes.io/projected/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-kube-api-access-89hkz\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.596243 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-catalog-content\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.596687 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-catalog-content\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.619060 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89hkz\" (UniqueName: \"kubernetes.io/projected/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-kube-api-access-89hkz\") pod \"certified-operators-h2jmd\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:32 crc kubenswrapper[4940]: I1126 10:26:32.806111 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:33 crc kubenswrapper[4940]: I1126 10:26:33.349468 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h2jmd"] Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.053403 4940 generic.go:334] "Generic (PLEG): container finished" podID="9bd0dcd2-0d52-4560-81ee-c3ab9fe537be" containerID="aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26" exitCode=0 Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.053514 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h2jmd" event={"ID":"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be","Type":"ContainerDied","Data":"aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26"} Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.053693 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h2jmd" event={"ID":"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be","Type":"ContainerStarted","Data":"a8ff1f7e3a5922a932b70f3ee83ece893d3df54ea057137fdb7402dc0486ab15"} Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.055036 4940 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.195198 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nb72f"] Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.197428 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.230475 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nb72f"] Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.339111 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-utilities\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.339196 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-catalog-content\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.339271 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pbmk\" (UniqueName: \"kubernetes.io/projected/a2e196c0-25f1-4d75-857a-5c2e9324fe65-kube-api-access-2pbmk\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.441642 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-utilities\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.441030 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-utilities\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.442121 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-catalog-content\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.442508 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-catalog-content\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.442601 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pbmk\" (UniqueName: \"kubernetes.io/projected/a2e196c0-25f1-4d75-857a-5c2e9324fe65-kube-api-access-2pbmk\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.464923 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pbmk\" (UniqueName: \"kubernetes.io/projected/a2e196c0-25f1-4d75-857a-5c2e9324fe65-kube-api-access-2pbmk\") pod \"redhat-operators-nb72f\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.526223 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.801270 4940 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qrmtq"] Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.803873 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.815740 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qrmtq"] Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.849920 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-utilities\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.850049 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgxh2\" (UniqueName: \"kubernetes.io/projected/b372d1b0-c82b-41bc-953b-121df9f17782-kube-api-access-xgxh2\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.850070 4940 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-catalog-content\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.952220 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-utilities\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.952437 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgxh2\" (UniqueName: \"kubernetes.io/projected/b372d1b0-c82b-41bc-953b-121df9f17782-kube-api-access-xgxh2\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.952788 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-utilities\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.952469 4940 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-catalog-content\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.953009 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-catalog-content\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:34 crc kubenswrapper[4940]: I1126 10:26:34.970977 4940 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgxh2\" (UniqueName: \"kubernetes.io/projected/b372d1b0-c82b-41bc-953b-121df9f17782-kube-api-access-xgxh2\") pod \"community-operators-qrmtq\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:35 crc kubenswrapper[4940]: I1126 10:26:35.019201 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nb72f"] Nov 26 10:26:35 crc kubenswrapper[4940]: I1126 10:26:35.065894 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb72f" event={"ID":"a2e196c0-25f1-4d75-857a-5c2e9324fe65","Type":"ContainerStarted","Data":"2e51d0c4b0c66366d370c010b995661c045b3b6b6e066c93c4c0b88e4b73181c"} Nov 26 10:26:35 crc kubenswrapper[4940]: I1126 10:26:35.135470 4940 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:35 crc kubenswrapper[4940]: I1126 10:26:35.638787 4940 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qrmtq"] Nov 26 10:26:36 crc kubenswrapper[4940]: I1126 10:26:36.085200 4940 generic.go:334] "Generic (PLEG): container finished" podID="a2e196c0-25f1-4d75-857a-5c2e9324fe65" containerID="e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0" exitCode=0 Nov 26 10:26:36 crc kubenswrapper[4940]: I1126 10:26:36.085291 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb72f" event={"ID":"a2e196c0-25f1-4d75-857a-5c2e9324fe65","Type":"ContainerDied","Data":"e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0"} Nov 26 10:26:36 crc kubenswrapper[4940]: I1126 10:26:36.090467 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h2jmd" event={"ID":"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be","Type":"ContainerStarted","Data":"0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1"} Nov 26 10:26:36 crc kubenswrapper[4940]: I1126 10:26:36.094254 4940 generic.go:334] "Generic (PLEG): container finished" podID="b372d1b0-c82b-41bc-953b-121df9f17782" containerID="4c51033aa4d4f68952164d3d924145c2f30c66a7aa0c88e81f8eac38c0108e08" exitCode=0 Nov 26 10:26:36 crc kubenswrapper[4940]: I1126 10:26:36.094310 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qrmtq" event={"ID":"b372d1b0-c82b-41bc-953b-121df9f17782","Type":"ContainerDied","Data":"4c51033aa4d4f68952164d3d924145c2f30c66a7aa0c88e81f8eac38c0108e08"} Nov 26 10:26:36 crc kubenswrapper[4940]: I1126 10:26:36.094341 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qrmtq" event={"ID":"b372d1b0-c82b-41bc-953b-121df9f17782","Type":"ContainerStarted","Data":"b6aaf9cc13ce07e4010c4220e37f4896c43150bab29b7a6cf63ee7c7f52639c5"} Nov 26 10:26:38 crc kubenswrapper[4940]: I1126 10:26:38.130903 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb72f" event={"ID":"a2e196c0-25f1-4d75-857a-5c2e9324fe65","Type":"ContainerStarted","Data":"c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9"} Nov 26 10:26:38 crc kubenswrapper[4940]: I1126 10:26:38.134485 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qrmtq" event={"ID":"b372d1b0-c82b-41bc-953b-121df9f17782","Type":"ContainerStarted","Data":"5b4987f5b50c4a00303d514e6e98fa500d937048d1c6b2dddaa2661f6e86d6e2"} Nov 26 10:26:39 crc kubenswrapper[4940]: I1126 10:26:39.153621 4940 generic.go:334] "Generic (PLEG): container finished" podID="9bd0dcd2-0d52-4560-81ee-c3ab9fe537be" containerID="0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1" exitCode=0 Nov 26 10:26:39 crc kubenswrapper[4940]: I1126 10:26:39.153888 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h2jmd" event={"ID":"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be","Type":"ContainerDied","Data":"0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1"} Nov 26 10:26:40 crc kubenswrapper[4940]: I1126 10:26:40.172434 4940 generic.go:334] "Generic (PLEG): container finished" podID="b372d1b0-c82b-41bc-953b-121df9f17782" containerID="5b4987f5b50c4a00303d514e6e98fa500d937048d1c6b2dddaa2661f6e86d6e2" exitCode=0 Nov 26 10:26:40 crc kubenswrapper[4940]: I1126 10:26:40.172552 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qrmtq" event={"ID":"b372d1b0-c82b-41bc-953b-121df9f17782","Type":"ContainerDied","Data":"5b4987f5b50c4a00303d514e6e98fa500d937048d1c6b2dddaa2661f6e86d6e2"} Nov 26 10:26:41 crc kubenswrapper[4940]: I1126 10:26:41.166351 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:26:41 crc kubenswrapper[4940]: E1126 10:26:41.173143 4940 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kbfvm_openshift-machine-config-operator(1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd)\"" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" podUID="1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd" Nov 26 10:26:41 crc kubenswrapper[4940]: I1126 10:26:41.220349 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h2jmd" event={"ID":"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be","Type":"ContainerStarted","Data":"38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71"} Nov 26 10:26:41 crc kubenswrapper[4940]: I1126 10:26:41.252655 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-h2jmd" podStartSLOduration=3.6409958590000002 podStartE2EDuration="9.25262007s" podCreationTimestamp="2025-11-26 10:26:32 +0000 UTC" firstStartedPulling="2025-11-26 10:26:34.054851342 +0000 UTC m=+12695.574992961" lastFinishedPulling="2025-11-26 10:26:39.666475523 +0000 UTC m=+12701.186617172" observedRunningTime="2025-11-26 10:26:41.24351366 +0000 UTC m=+12702.763655309" watchObservedRunningTime="2025-11-26 10:26:41.25262007 +0000 UTC m=+12702.772761689" Nov 26 10:26:42 crc kubenswrapper[4940]: I1126 10:26:42.807144 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:42 crc kubenswrapper[4940]: I1126 10:26:42.807665 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:42 crc kubenswrapper[4940]: I1126 10:26:42.882464 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:43 crc kubenswrapper[4940]: I1126 10:26:43.251960 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qrmtq" event={"ID":"b372d1b0-c82b-41bc-953b-121df9f17782","Type":"ContainerStarted","Data":"dc81d75bf70910fa9199bfdcd3ddc30a8ce77a9c6f3fdf9d17360aa688808612"} Nov 26 10:26:43 crc kubenswrapper[4940]: I1126 10:26:43.272356 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qrmtq" podStartSLOduration=3.838076672 podStartE2EDuration="9.272338594s" podCreationTimestamp="2025-11-26 10:26:34 +0000 UTC" firstStartedPulling="2025-11-26 10:26:36.097699323 +0000 UTC m=+12697.617840982" lastFinishedPulling="2025-11-26 10:26:41.531961285 +0000 UTC m=+12703.052102904" observedRunningTime="2025-11-26 10:26:43.27099044 +0000 UTC m=+12704.791132079" watchObservedRunningTime="2025-11-26 10:26:43.272338594 +0000 UTC m=+12704.792480213" Nov 26 10:26:44 crc kubenswrapper[4940]: I1126 10:26:44.267448 4940 generic.go:334] "Generic (PLEG): container finished" podID="a2e196c0-25f1-4d75-857a-5c2e9324fe65" containerID="c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9" exitCode=0 Nov 26 10:26:44 crc kubenswrapper[4940]: I1126 10:26:44.267547 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb72f" event={"ID":"a2e196c0-25f1-4d75-857a-5c2e9324fe65","Type":"ContainerDied","Data":"c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9"} Nov 26 10:26:45 crc kubenswrapper[4940]: I1126 10:26:45.135787 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:45 crc kubenswrapper[4940]: I1126 10:26:45.136118 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:45 crc kubenswrapper[4940]: I1126 10:26:45.201492 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:45 crc kubenswrapper[4940]: I1126 10:26:45.283433 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb72f" event={"ID":"a2e196c0-25f1-4d75-857a-5c2e9324fe65","Type":"ContainerStarted","Data":"d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936"} Nov 26 10:26:52 crc kubenswrapper[4940]: I1126 10:26:52.882852 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:52 crc kubenswrapper[4940]: I1126 10:26:52.918490 4940 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nb72f" podStartSLOduration=10.083906226 podStartE2EDuration="18.918458204s" podCreationTimestamp="2025-11-26 10:26:34 +0000 UTC" firstStartedPulling="2025-11-26 10:26:36.087135426 +0000 UTC m=+12697.607277055" lastFinishedPulling="2025-11-26 10:26:44.921687404 +0000 UTC m=+12706.441829033" observedRunningTime="2025-11-26 10:26:45.342669739 +0000 UTC m=+12706.862811358" watchObservedRunningTime="2025-11-26 10:26:52.918458204 +0000 UTC m=+12714.438599853" Nov 26 10:26:52 crc kubenswrapper[4940]: I1126 10:26:52.951628 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h2jmd"] Nov 26 10:26:53 crc kubenswrapper[4940]: I1126 10:26:53.374535 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-h2jmd" podUID="9bd0dcd2-0d52-4560-81ee-c3ab9fe537be" containerName="registry-server" containerID="cri-o://38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71" gracePeriod=2 Nov 26 10:26:53 crc kubenswrapper[4940]: I1126 10:26:53.900986 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.034569 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-utilities\") pod \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.034720 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-catalog-content\") pod \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.034906 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89hkz\" (UniqueName: \"kubernetes.io/projected/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-kube-api-access-89hkz\") pod \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\" (UID: \"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be\") " Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.036219 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-utilities" (OuterVolumeSpecName: "utilities") pod "9bd0dcd2-0d52-4560-81ee-c3ab9fe537be" (UID: "9bd0dcd2-0d52-4560-81ee-c3ab9fe537be"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.044062 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.046402 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-kube-api-access-89hkz" (OuterVolumeSpecName: "kube-api-access-89hkz") pod "9bd0dcd2-0d52-4560-81ee-c3ab9fe537be" (UID: "9bd0dcd2-0d52-4560-81ee-c3ab9fe537be"). InnerVolumeSpecName "kube-api-access-89hkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.098643 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9bd0dcd2-0d52-4560-81ee-c3ab9fe537be" (UID: "9bd0dcd2-0d52-4560-81ee-c3ab9fe537be"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.146731 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.146759 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89hkz\" (UniqueName: \"kubernetes.io/projected/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be-kube-api-access-89hkz\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.397980 4940 generic.go:334] "Generic (PLEG): container finished" podID="9bd0dcd2-0d52-4560-81ee-c3ab9fe537be" containerID="38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71" exitCode=0 Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.398184 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h2jmd" event={"ID":"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be","Type":"ContainerDied","Data":"38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71"} Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.398263 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h2jmd" event={"ID":"9bd0dcd2-0d52-4560-81ee-c3ab9fe537be","Type":"ContainerDied","Data":"a8ff1f7e3a5922a932b70f3ee83ece893d3df54ea057137fdb7402dc0486ab15"} Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.398311 4940 scope.go:117] "RemoveContainer" containerID="38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.400673 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h2jmd" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.441412 4940 scope.go:117] "RemoveContainer" containerID="0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.480175 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h2jmd"] Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.483683 4940 scope.go:117] "RemoveContainer" containerID="aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.494173 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-h2jmd"] Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.527275 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.527714 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.555139 4940 scope.go:117] "RemoveContainer" containerID="38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71" Nov 26 10:26:54 crc kubenswrapper[4940]: E1126 10:26:54.555675 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71\": container with ID starting with 38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71 not found: ID does not exist" containerID="38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.555721 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71"} err="failed to get container status \"38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71\": rpc error: code = NotFound desc = could not find container \"38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71\": container with ID starting with 38e56108ef79e4a76746f19dbc88c2d9502ca9d2be17ce702c7f118562b8ef71 not found: ID does not exist" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.555800 4940 scope.go:117] "RemoveContainer" containerID="0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1" Nov 26 10:26:54 crc kubenswrapper[4940]: E1126 10:26:54.556031 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1\": container with ID starting with 0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1 not found: ID does not exist" containerID="0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.556072 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1"} err="failed to get container status \"0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1\": rpc error: code = NotFound desc = could not find container \"0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1\": container with ID starting with 0d3bc5bba9bda8d1c17f553b1e1e400f8b3c7b1785ad28e839d73f751a0011a1 not found: ID does not exist" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.556089 4940 scope.go:117] "RemoveContainer" containerID="aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26" Nov 26 10:26:54 crc kubenswrapper[4940]: E1126 10:26:54.556299 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26\": container with ID starting with aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26 not found: ID does not exist" containerID="aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.556319 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26"} err="failed to get container status \"aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26\": rpc error: code = NotFound desc = could not find container \"aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26\": container with ID starting with aca347c6c4f56a59c28f41088147f15bb88d8624861d028742148fb922377b26 not found: ID does not exist" Nov 26 10:26:54 crc kubenswrapper[4940]: I1126 10:26:54.581057 4940 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:55 crc kubenswrapper[4940]: I1126 10:26:55.180735 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bd0dcd2-0d52-4560-81ee-c3ab9fe537be" path="/var/lib/kubelet/pods/9bd0dcd2-0d52-4560-81ee-c3ab9fe537be/volumes" Nov 26 10:26:55 crc kubenswrapper[4940]: I1126 10:26:55.186245 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:55 crc kubenswrapper[4940]: I1126 10:26:55.468961 4940 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:56 crc kubenswrapper[4940]: I1126 10:26:56.167704 4940 scope.go:117] "RemoveContainer" containerID="ca74cafde66a6c46320ae760de9ed587fceb1b91105cb64b82d576daefc4fb92" Nov 26 10:26:56 crc kubenswrapper[4940]: I1126 10:26:56.925508 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nb72f"] Nov 26 10:26:57 crc kubenswrapper[4940]: I1126 10:26:57.468912 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kbfvm" event={"ID":"1ad7e56d-d7f6-421a-ba5a-d2d8d5b6f6fd","Type":"ContainerStarted","Data":"a7f9e9c036fb961994efb118644d2b242700c68419dec2bfb71e04c596dae294"} Nov 26 10:26:58 crc kubenswrapper[4940]: I1126 10:26:58.320716 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qrmtq"] Nov 26 10:26:58 crc kubenswrapper[4940]: I1126 10:26:58.321207 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qrmtq" podUID="b372d1b0-c82b-41bc-953b-121df9f17782" containerName="registry-server" containerID="cri-o://dc81d75bf70910fa9199bfdcd3ddc30a8ce77a9c6f3fdf9d17360aa688808612" gracePeriod=2 Nov 26 10:26:58 crc kubenswrapper[4940]: I1126 10:26:58.515730 4940 generic.go:334] "Generic (PLEG): container finished" podID="b372d1b0-c82b-41bc-953b-121df9f17782" containerID="dc81d75bf70910fa9199bfdcd3ddc30a8ce77a9c6f3fdf9d17360aa688808612" exitCode=0 Nov 26 10:26:58 crc kubenswrapper[4940]: I1126 10:26:58.516007 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qrmtq" event={"ID":"b372d1b0-c82b-41bc-953b-121df9f17782","Type":"ContainerDied","Data":"dc81d75bf70910fa9199bfdcd3ddc30a8ce77a9c6f3fdf9d17360aa688808612"} Nov 26 10:26:58 crc kubenswrapper[4940]: I1126 10:26:58.516189 4940 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nb72f" podUID="a2e196c0-25f1-4d75-857a-5c2e9324fe65" containerName="registry-server" containerID="cri-o://d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936" gracePeriod=2 Nov 26 10:26:58 crc kubenswrapper[4940]: I1126 10:26:58.921685 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.050067 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.068914 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgxh2\" (UniqueName: \"kubernetes.io/projected/b372d1b0-c82b-41bc-953b-121df9f17782-kube-api-access-xgxh2\") pod \"b372d1b0-c82b-41bc-953b-121df9f17782\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.069422 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-utilities\") pod \"b372d1b0-c82b-41bc-953b-121df9f17782\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.069502 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-catalog-content\") pod \"b372d1b0-c82b-41bc-953b-121df9f17782\" (UID: \"b372d1b0-c82b-41bc-953b-121df9f17782\") " Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.070591 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-utilities" (OuterVolumeSpecName: "utilities") pod "b372d1b0-c82b-41bc-953b-121df9f17782" (UID: "b372d1b0-c82b-41bc-953b-121df9f17782"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.078285 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b372d1b0-c82b-41bc-953b-121df9f17782-kube-api-access-xgxh2" (OuterVolumeSpecName: "kube-api-access-xgxh2") pod "b372d1b0-c82b-41bc-953b-121df9f17782" (UID: "b372d1b0-c82b-41bc-953b-121df9f17782"). InnerVolumeSpecName "kube-api-access-xgxh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.133174 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b372d1b0-c82b-41bc-953b-121df9f17782" (UID: "b372d1b0-c82b-41bc-953b-121df9f17782"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.171773 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pbmk\" (UniqueName: \"kubernetes.io/projected/a2e196c0-25f1-4d75-857a-5c2e9324fe65-kube-api-access-2pbmk\") pod \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.171887 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-utilities\") pod \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.171990 4940 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-catalog-content\") pod \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\" (UID: \"a2e196c0-25f1-4d75-857a-5c2e9324fe65\") " Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.172590 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.172606 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgxh2\" (UniqueName: \"kubernetes.io/projected/b372d1b0-c82b-41bc-953b-121df9f17782-kube-api-access-xgxh2\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.172617 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b372d1b0-c82b-41bc-953b-121df9f17782-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.172934 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-utilities" (OuterVolumeSpecName: "utilities") pod "a2e196c0-25f1-4d75-857a-5c2e9324fe65" (UID: "a2e196c0-25f1-4d75-857a-5c2e9324fe65"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.176843 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2e196c0-25f1-4d75-857a-5c2e9324fe65-kube-api-access-2pbmk" (OuterVolumeSpecName: "kube-api-access-2pbmk") pod "a2e196c0-25f1-4d75-857a-5c2e9324fe65" (UID: "a2e196c0-25f1-4d75-857a-5c2e9324fe65"). InnerVolumeSpecName "kube-api-access-2pbmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.252372 4940 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2e196c0-25f1-4d75-857a-5c2e9324fe65" (UID: "a2e196c0-25f1-4d75-857a-5c2e9324fe65"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.282826 4940 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pbmk\" (UniqueName: \"kubernetes.io/projected/a2e196c0-25f1-4d75-857a-5c2e9324fe65-kube-api-access-2pbmk\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.282912 4940 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.282932 4940 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2e196c0-25f1-4d75-857a-5c2e9324fe65-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.534008 4940 generic.go:334] "Generic (PLEG): container finished" podID="a2e196c0-25f1-4d75-857a-5c2e9324fe65" containerID="d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936" exitCode=0 Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.534208 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nb72f" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.534228 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb72f" event={"ID":"a2e196c0-25f1-4d75-857a-5c2e9324fe65","Type":"ContainerDied","Data":"d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936"} Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.534322 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nb72f" event={"ID":"a2e196c0-25f1-4d75-857a-5c2e9324fe65","Type":"ContainerDied","Data":"2e51d0c4b0c66366d370c010b995661c045b3b6b6e066c93c4c0b88e4b73181c"} Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.534352 4940 scope.go:117] "RemoveContainer" containerID="d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.546419 4940 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qrmtq" event={"ID":"b372d1b0-c82b-41bc-953b-121df9f17782","Type":"ContainerDied","Data":"b6aaf9cc13ce07e4010c4220e37f4896c43150bab29b7a6cf63ee7c7f52639c5"} Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.546633 4940 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qrmtq" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.573792 4940 scope.go:117] "RemoveContainer" containerID="c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.587474 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qrmtq"] Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.612154 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qrmtq"] Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.615260 4940 scope.go:117] "RemoveContainer" containerID="e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.620956 4940 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nb72f"] Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.633182 4940 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nb72f"] Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.666007 4940 scope.go:117] "RemoveContainer" containerID="d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936" Nov 26 10:26:59 crc kubenswrapper[4940]: E1126 10:26:59.666635 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936\": container with ID starting with d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936 not found: ID does not exist" containerID="d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.666691 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936"} err="failed to get container status \"d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936\": rpc error: code = NotFound desc = could not find container \"d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936\": container with ID starting with d278fca26c29ff5ceb3c962a5d12f707fed6cf04bcffb9fda2ede7c0ef191936 not found: ID does not exist" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.666728 4940 scope.go:117] "RemoveContainer" containerID="c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9" Nov 26 10:26:59 crc kubenswrapper[4940]: E1126 10:26:59.667088 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9\": container with ID starting with c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9 not found: ID does not exist" containerID="c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.667126 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9"} err="failed to get container status \"c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9\": rpc error: code = NotFound desc = could not find container \"c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9\": container with ID starting with c0231e7ac898c26a8ae213da259bf761c957647aba5d20966f2b94ba084b1ce9 not found: ID does not exist" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.667151 4940 scope.go:117] "RemoveContainer" containerID="e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0" Nov 26 10:26:59 crc kubenswrapper[4940]: E1126 10:26:59.667815 4940 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0\": container with ID starting with e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0 not found: ID does not exist" containerID="e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.667852 4940 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0"} err="failed to get container status \"e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0\": rpc error: code = NotFound desc = could not find container \"e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0\": container with ID starting with e36ea88914b9d3142dec8bc579063d399468d54391512ad799e16d5501efd2f0 not found: ID does not exist" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.667877 4940 scope.go:117] "RemoveContainer" containerID="dc81d75bf70910fa9199bfdcd3ddc30a8ce77a9c6f3fdf9d17360aa688808612" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.711195 4940 scope.go:117] "RemoveContainer" containerID="5b4987f5b50c4a00303d514e6e98fa500d937048d1c6b2dddaa2661f6e86d6e2" Nov 26 10:26:59 crc kubenswrapper[4940]: I1126 10:26:59.753543 4940 scope.go:117] "RemoveContainer" containerID="4c51033aa4d4f68952164d3d924145c2f30c66a7aa0c88e81f8eac38c0108e08" Nov 26 10:27:01 crc kubenswrapper[4940]: I1126 10:27:01.178810 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2e196c0-25f1-4d75-857a-5c2e9324fe65" path="/var/lib/kubelet/pods/a2e196c0-25f1-4d75-857a-5c2e9324fe65/volumes" Nov 26 10:27:01 crc kubenswrapper[4940]: I1126 10:27:01.180239 4940 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b372d1b0-c82b-41bc-953b-121df9f17782" path="/var/lib/kubelet/pods/b372d1b0-c82b-41bc-953b-121df9f17782/volumes" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111553163024446 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111553164017364 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111521157016504 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111521160015446 5ustar corecore